fc.c 78 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/blk-mq.h>
  9. #include <linux/parser.h>
  10. #include <linux/random.h>
  11. #include <uapi/scsi/fc/fc_fs.h>
  12. #include <uapi/scsi/fc/fc_els.h>
  13. #include "nvmet.h"
  14. #include <linux/nvme-fc-driver.h>
  15. #include <linux/nvme-fc.h>
  16. #include "../host/fc.h"
  17. /* *************************** Data Structures/Defines ****************** */
  18. #define NVMET_LS_CTX_COUNT 256
  19. struct nvmet_fc_tgtport;
  20. struct nvmet_fc_tgt_assoc;
  21. struct nvmet_fc_ls_iod { /* for an LS RQST RCV */
  22. struct nvmefc_ls_rsp *lsrsp;
  23. struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
  24. struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
  25. struct nvmet_fc_tgtport *tgtport;
  26. struct nvmet_fc_tgt_assoc *assoc;
  27. void *hosthandle;
  28. union nvmefc_ls_requests *rqstbuf;
  29. union nvmefc_ls_responses *rspbuf;
  30. u16 rqstdatalen;
  31. dma_addr_t rspdma;
  32. struct scatterlist sg[2];
  33. struct work_struct work;
  34. } __aligned(sizeof(unsigned long long));
  35. struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
  36. struct nvmefc_ls_req ls_req;
  37. struct nvmet_fc_tgtport *tgtport;
  38. void *hosthandle;
  39. int ls_error;
  40. struct list_head lsreq_list; /* tgtport->ls_req_list */
  41. bool req_queued;
  42. };
  43. /* desired maximum for a single sequence - if sg list allows it */
  44. #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
  45. enum nvmet_fcp_datadir {
  46. NVMET_FCP_NODATA,
  47. NVMET_FCP_WRITE,
  48. NVMET_FCP_READ,
  49. NVMET_FCP_ABORTED,
  50. };
  51. struct nvmet_fc_fcp_iod {
  52. struct nvmefc_tgt_fcp_req *fcpreq;
  53. struct nvme_fc_cmd_iu cmdiubuf;
  54. struct nvme_fc_ersp_iu rspiubuf;
  55. dma_addr_t rspdma;
  56. struct scatterlist *next_sg;
  57. struct scatterlist *data_sg;
  58. int data_sg_cnt;
  59. u32 offset;
  60. enum nvmet_fcp_datadir io_dir;
  61. bool active;
  62. bool abort;
  63. bool aborted;
  64. bool writedataactive;
  65. spinlock_t flock;
  66. struct nvmet_req req;
  67. struct work_struct defer_work;
  68. struct nvmet_fc_tgtport *tgtport;
  69. struct nvmet_fc_tgt_queue *queue;
  70. struct list_head fcp_list; /* tgtport->fcp_list */
  71. };
  72. struct nvmet_fc_tgtport {
  73. struct nvmet_fc_target_port fc_target_port;
  74. struct list_head tgt_list; /* nvmet_fc_target_list */
  75. struct device *dev; /* dev for dma mapping */
  76. struct nvmet_fc_target_template *ops;
  77. struct nvmet_fc_ls_iod *iod;
  78. spinlock_t lock;
  79. struct list_head ls_rcv_list;
  80. struct list_head ls_req_list;
  81. struct list_head ls_busylist;
  82. struct list_head assoc_list;
  83. struct list_head host_list;
  84. struct ida assoc_cnt;
  85. struct nvmet_fc_port_entry *pe;
  86. struct kref ref;
  87. u32 max_sg_cnt;
  88. };
  89. struct nvmet_fc_port_entry {
  90. struct nvmet_fc_tgtport *tgtport;
  91. struct nvmet_port *port;
  92. u64 node_name;
  93. u64 port_name;
  94. struct list_head pe_list;
  95. };
  96. struct nvmet_fc_defer_fcp_req {
  97. struct list_head req_list;
  98. struct nvmefc_tgt_fcp_req *fcp_req;
  99. };
  100. struct nvmet_fc_tgt_queue {
  101. bool ninetypercent;
  102. u16 qid;
  103. u16 sqsize;
  104. u16 ersp_ratio;
  105. __le16 sqhd;
  106. atomic_t connected;
  107. atomic_t sqtail;
  108. atomic_t zrspcnt;
  109. atomic_t rsn;
  110. spinlock_t qlock;
  111. struct nvmet_cq nvme_cq;
  112. struct nvmet_sq nvme_sq;
  113. struct nvmet_fc_tgt_assoc *assoc;
  114. struct list_head fod_list;
  115. struct list_head pending_cmd_list;
  116. struct list_head avail_defer_list;
  117. struct workqueue_struct *work_q;
  118. struct kref ref;
  119. struct rcu_head rcu;
  120. struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
  121. } __aligned(sizeof(unsigned long long));
  122. struct nvmet_fc_hostport {
  123. struct nvmet_fc_tgtport *tgtport;
  124. void *hosthandle;
  125. struct list_head host_list;
  126. struct kref ref;
  127. u8 invalid;
  128. };
  129. struct nvmet_fc_tgt_assoc {
  130. u64 association_id;
  131. u32 a_id;
  132. atomic_t terminating;
  133. struct nvmet_fc_tgtport *tgtport;
  134. struct nvmet_fc_hostport *hostport;
  135. struct nvmet_fc_ls_iod *rcv_disconn;
  136. struct list_head a_list;
  137. struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1];
  138. struct kref ref;
  139. struct work_struct del_work;
  140. struct rcu_head rcu;
  141. };
  142. static inline int
  143. nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
  144. {
  145. return (iodptr - iodptr->tgtport->iod);
  146. }
  147. static inline int
  148. nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
  149. {
  150. return (fodptr - fodptr->queue->fod);
  151. }
  152. /*
  153. * Association and Connection IDs:
  154. *
  155. * Association ID will have random number in upper 6 bytes and zero
  156. * in lower 2 bytes
  157. *
  158. * Connection IDs will be Association ID with QID or'd in lower 2 bytes
  159. *
  160. * note: Association ID = Connection ID for queue 0
  161. */
  162. #define BYTES_FOR_QID sizeof(u16)
  163. #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
  164. #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
  165. static inline u64
  166. nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
  167. {
  168. return (assoc->association_id | qid);
  169. }
  170. static inline u64
  171. nvmet_fc_getassociationid(u64 connectionid)
  172. {
  173. return connectionid & ~NVMET_FC_QUEUEID_MASK;
  174. }
  175. static inline u16
  176. nvmet_fc_getqueueid(u64 connectionid)
  177. {
  178. return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
  179. }
  180. static inline struct nvmet_fc_tgtport *
  181. targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
  182. {
  183. return container_of(targetport, struct nvmet_fc_tgtport,
  184. fc_target_port);
  185. }
  186. static inline struct nvmet_fc_fcp_iod *
  187. nvmet_req_to_fod(struct nvmet_req *nvme_req)
  188. {
  189. return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
  190. }
  191. /* *************************** Globals **************************** */
  192. static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
  193. static LIST_HEAD(nvmet_fc_target_list);
  194. static DEFINE_IDA(nvmet_fc_tgtport_cnt);
  195. static LIST_HEAD(nvmet_fc_portentry_list);
  196. static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
  197. static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
  198. static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
  199. static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
  200. static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
  201. static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
  202. static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
  203. static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
  204. static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
  205. struct nvmet_fc_fcp_iod *fod);
  206. static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
  207. static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
  208. struct nvmet_fc_ls_iod *iod);
  209. /* *********************** FC-NVME DMA Handling **************************** */
  210. /*
  211. * The fcloop device passes in a NULL device pointer. Real LLD's will
  212. * pass in a valid device pointer. If NULL is passed to the dma mapping
  213. * routines, depending on the platform, it may or may not succeed, and
  214. * may crash.
  215. *
  216. * As such:
  217. * Wrapper all the dma routines and check the dev pointer.
  218. *
  219. * If simple mappings (return just a dma address, we'll noop them,
  220. * returning a dma address of 0.
  221. *
  222. * On more complex mappings (dma_map_sg), a pseudo routine fills
  223. * in the scatter list, setting all dma addresses to 0.
  224. */
  225. static inline dma_addr_t
  226. fc_dma_map_single(struct device *dev, void *ptr, size_t size,
  227. enum dma_data_direction dir)
  228. {
  229. return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
  230. }
  231. static inline int
  232. fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  233. {
  234. return dev ? dma_mapping_error(dev, dma_addr) : 0;
  235. }
  236. static inline void
  237. fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
  238. enum dma_data_direction dir)
  239. {
  240. if (dev)
  241. dma_unmap_single(dev, addr, size, dir);
  242. }
  243. static inline void
  244. fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  245. enum dma_data_direction dir)
  246. {
  247. if (dev)
  248. dma_sync_single_for_cpu(dev, addr, size, dir);
  249. }
  250. static inline void
  251. fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
  252. enum dma_data_direction dir)
  253. {
  254. if (dev)
  255. dma_sync_single_for_device(dev, addr, size, dir);
  256. }
  257. /* pseudo dma_map_sg call */
  258. static int
  259. fc_map_sg(struct scatterlist *sg, int nents)
  260. {
  261. struct scatterlist *s;
  262. int i;
  263. WARN_ON(nents == 0 || sg[0].length == 0);
  264. for_each_sg(sg, s, nents, i) {
  265. s->dma_address = 0L;
  266. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  267. s->dma_length = s->length;
  268. #endif
  269. }
  270. return nents;
  271. }
  272. static inline int
  273. fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  274. enum dma_data_direction dir)
  275. {
  276. return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
  277. }
  278. static inline void
  279. fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  280. enum dma_data_direction dir)
  281. {
  282. if (dev)
  283. dma_unmap_sg(dev, sg, nents, dir);
  284. }
  285. /* ********************** FC-NVME LS XMT Handling ************************* */
  286. static void
  287. __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
  288. {
  289. struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
  290. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  291. unsigned long flags;
  292. spin_lock_irqsave(&tgtport->lock, flags);
  293. if (!lsop->req_queued) {
  294. spin_unlock_irqrestore(&tgtport->lock, flags);
  295. return;
  296. }
  297. list_del(&lsop->lsreq_list);
  298. lsop->req_queued = false;
  299. spin_unlock_irqrestore(&tgtport->lock, flags);
  300. fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
  301. (lsreq->rqstlen + lsreq->rsplen),
  302. DMA_BIDIRECTIONAL);
  303. nvmet_fc_tgtport_put(tgtport);
  304. }
  305. static int
  306. __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
  307. struct nvmet_fc_ls_req_op *lsop,
  308. void (*done)(struct nvmefc_ls_req *req, int status))
  309. {
  310. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  311. unsigned long flags;
  312. int ret = 0;
  313. if (!tgtport->ops->ls_req)
  314. return -EOPNOTSUPP;
  315. if (!nvmet_fc_tgtport_get(tgtport))
  316. return -ESHUTDOWN;
  317. lsreq->done = done;
  318. lsop->req_queued = false;
  319. INIT_LIST_HEAD(&lsop->lsreq_list);
  320. lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
  321. lsreq->rqstlen + lsreq->rsplen,
  322. DMA_BIDIRECTIONAL);
  323. if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
  324. ret = -EFAULT;
  325. goto out_puttgtport;
  326. }
  327. lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
  328. spin_lock_irqsave(&tgtport->lock, flags);
  329. list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
  330. lsop->req_queued = true;
  331. spin_unlock_irqrestore(&tgtport->lock, flags);
  332. ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
  333. lsreq);
  334. if (ret)
  335. goto out_unlink;
  336. return 0;
  337. out_unlink:
  338. lsop->ls_error = ret;
  339. spin_lock_irqsave(&tgtport->lock, flags);
  340. lsop->req_queued = false;
  341. list_del(&lsop->lsreq_list);
  342. spin_unlock_irqrestore(&tgtport->lock, flags);
  343. fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
  344. (lsreq->rqstlen + lsreq->rsplen),
  345. DMA_BIDIRECTIONAL);
  346. out_puttgtport:
  347. nvmet_fc_tgtport_put(tgtport);
  348. return ret;
  349. }
  350. static int
  351. nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
  352. struct nvmet_fc_ls_req_op *lsop,
  353. void (*done)(struct nvmefc_ls_req *req, int status))
  354. {
  355. /* don't wait for completion */
  356. return __nvmet_fc_send_ls_req(tgtport, lsop, done);
  357. }
  358. static void
  359. nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
  360. {
  361. struct nvmet_fc_ls_req_op *lsop =
  362. container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
  363. __nvmet_fc_finish_ls_req(lsop);
  364. /* fc-nvme target doesn't care about success or failure of cmd */
  365. kfree(lsop);
  366. }
  367. /*
  368. * This routine sends a FC-NVME LS to disconnect (aka terminate)
  369. * the FC-NVME Association. Terminating the association also
  370. * terminates the FC-NVME connections (per queue, both admin and io
  371. * queues) that are part of the association. E.g. things are torn
  372. * down, and the related FC-NVME Association ID and Connection IDs
  373. * become invalid.
  374. *
  375. * The behavior of the fc-nvme target is such that it's
  376. * understanding of the association and connections will implicitly
  377. * be torn down. The action is implicit as it may be due to a loss of
  378. * connectivity with the fc-nvme host, so the target may never get a
  379. * response even if it tried. As such, the action of this routine
  380. * is to asynchronously send the LS, ignore any results of the LS, and
  381. * continue on with terminating the association. If the fc-nvme host
  382. * is present and receives the LS, it too can tear down.
  383. */
  384. static void
  385. nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
  386. {
  387. struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  388. struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
  389. struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
  390. struct nvmet_fc_ls_req_op *lsop;
  391. struct nvmefc_ls_req *lsreq;
  392. int ret;
  393. /*
  394. * If ls_req is NULL or no hosthandle, it's an older lldd and no
  395. * message is normal. Otherwise, send unless the hostport has
  396. * already been invalidated by the lldd.
  397. */
  398. if (!tgtport->ops->ls_req || !assoc->hostport ||
  399. assoc->hostport->invalid)
  400. return;
  401. lsop = kzalloc((sizeof(*lsop) +
  402. sizeof(*discon_rqst) + sizeof(*discon_acc) +
  403. tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
  404. if (!lsop) {
  405. dev_info(tgtport->dev,
  406. "{%d:%d} send Disconnect Association failed: ENOMEM\n",
  407. tgtport->fc_target_port.port_num, assoc->a_id);
  408. return;
  409. }
  410. discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
  411. discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
  412. lsreq = &lsop->ls_req;
  413. if (tgtport->ops->lsrqst_priv_sz)
  414. lsreq->private = (void *)&discon_acc[1];
  415. else
  416. lsreq->private = NULL;
  417. lsop->tgtport = tgtport;
  418. lsop->hosthandle = assoc->hostport->hosthandle;
  419. nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
  420. assoc->association_id);
  421. ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
  422. nvmet_fc_disconnect_assoc_done);
  423. if (ret) {
  424. dev_info(tgtport->dev,
  425. "{%d:%d} XMT Disconnect Association failed: %d\n",
  426. tgtport->fc_target_port.port_num, assoc->a_id, ret);
  427. kfree(lsop);
  428. }
  429. }
  430. /* *********************** FC-NVME Port Management ************************ */
  431. static int
  432. nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
  433. {
  434. struct nvmet_fc_ls_iod *iod;
  435. int i;
  436. iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
  437. GFP_KERNEL);
  438. if (!iod)
  439. return -ENOMEM;
  440. tgtport->iod = iod;
  441. for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
  442. INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
  443. iod->tgtport = tgtport;
  444. list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
  445. iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
  446. sizeof(union nvmefc_ls_responses),
  447. GFP_KERNEL);
  448. if (!iod->rqstbuf)
  449. goto out_fail;
  450. iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
  451. iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
  452. sizeof(*iod->rspbuf),
  453. DMA_TO_DEVICE);
  454. if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
  455. goto out_fail;
  456. }
  457. return 0;
  458. out_fail:
  459. kfree(iod->rqstbuf);
  460. list_del(&iod->ls_rcv_list);
  461. for (iod--, i--; i >= 0; iod--, i--) {
  462. fc_dma_unmap_single(tgtport->dev, iod->rspdma,
  463. sizeof(*iod->rspbuf), DMA_TO_DEVICE);
  464. kfree(iod->rqstbuf);
  465. list_del(&iod->ls_rcv_list);
  466. }
  467. kfree(iod);
  468. return -EFAULT;
  469. }
  470. static void
  471. nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
  472. {
  473. struct nvmet_fc_ls_iod *iod = tgtport->iod;
  474. int i;
  475. for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
  476. fc_dma_unmap_single(tgtport->dev,
  477. iod->rspdma, sizeof(*iod->rspbuf),
  478. DMA_TO_DEVICE);
  479. kfree(iod->rqstbuf);
  480. list_del(&iod->ls_rcv_list);
  481. }
  482. kfree(tgtport->iod);
  483. }
  484. static struct nvmet_fc_ls_iod *
  485. nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
  486. {
  487. struct nvmet_fc_ls_iod *iod;
  488. unsigned long flags;
  489. spin_lock_irqsave(&tgtport->lock, flags);
  490. iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
  491. struct nvmet_fc_ls_iod, ls_rcv_list);
  492. if (iod)
  493. list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
  494. spin_unlock_irqrestore(&tgtport->lock, flags);
  495. return iod;
  496. }
  497. static void
  498. nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
  499. struct nvmet_fc_ls_iod *iod)
  500. {
  501. unsigned long flags;
  502. spin_lock_irqsave(&tgtport->lock, flags);
  503. list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
  504. spin_unlock_irqrestore(&tgtport->lock, flags);
  505. }
  506. static void
  507. nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
  508. struct nvmet_fc_tgt_queue *queue)
  509. {
  510. struct nvmet_fc_fcp_iod *fod = queue->fod;
  511. int i;
  512. for (i = 0; i < queue->sqsize; fod++, i++) {
  513. INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
  514. fod->tgtport = tgtport;
  515. fod->queue = queue;
  516. fod->active = false;
  517. fod->abort = false;
  518. fod->aborted = false;
  519. fod->fcpreq = NULL;
  520. list_add_tail(&fod->fcp_list, &queue->fod_list);
  521. spin_lock_init(&fod->flock);
  522. fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
  523. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  524. if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
  525. list_del(&fod->fcp_list);
  526. for (fod--, i--; i >= 0; fod--, i--) {
  527. fc_dma_unmap_single(tgtport->dev, fod->rspdma,
  528. sizeof(fod->rspiubuf),
  529. DMA_TO_DEVICE);
  530. fod->rspdma = 0L;
  531. list_del(&fod->fcp_list);
  532. }
  533. return;
  534. }
  535. }
  536. }
  537. static void
  538. nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
  539. struct nvmet_fc_tgt_queue *queue)
  540. {
  541. struct nvmet_fc_fcp_iod *fod = queue->fod;
  542. int i;
  543. for (i = 0; i < queue->sqsize; fod++, i++) {
  544. if (fod->rspdma)
  545. fc_dma_unmap_single(tgtport->dev, fod->rspdma,
  546. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  547. }
  548. }
  549. static struct nvmet_fc_fcp_iod *
  550. nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
  551. {
  552. struct nvmet_fc_fcp_iod *fod;
  553. lockdep_assert_held(&queue->qlock);
  554. fod = list_first_entry_or_null(&queue->fod_list,
  555. struct nvmet_fc_fcp_iod, fcp_list);
  556. if (fod) {
  557. list_del(&fod->fcp_list);
  558. fod->active = true;
  559. /*
  560. * no queue reference is taken, as it was taken by the
  561. * queue lookup just prior to the allocation. The iod
  562. * will "inherit" that reference.
  563. */
  564. }
  565. return fod;
  566. }
  567. static void
  568. nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
  569. struct nvmet_fc_tgt_queue *queue,
  570. struct nvmefc_tgt_fcp_req *fcpreq)
  571. {
  572. struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
  573. /*
  574. * put all admin cmds on hw queue id 0. All io commands go to
  575. * the respective hw queue based on a modulo basis
  576. */
  577. fcpreq->hwqid = queue->qid ?
  578. ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
  579. nvmet_fc_handle_fcp_rqst(tgtport, fod);
  580. }
  581. static void
  582. nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
  583. {
  584. struct nvmet_fc_fcp_iod *fod =
  585. container_of(work, struct nvmet_fc_fcp_iod, defer_work);
  586. /* Submit deferred IO for processing */
  587. nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
  588. }
  589. static void
  590. nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
  591. struct nvmet_fc_fcp_iod *fod)
  592. {
  593. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  594. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  595. struct nvmet_fc_defer_fcp_req *deferfcp;
  596. unsigned long flags;
  597. fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
  598. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  599. fcpreq->nvmet_fc_private = NULL;
  600. fod->active = false;
  601. fod->abort = false;
  602. fod->aborted = false;
  603. fod->writedataactive = false;
  604. fod->fcpreq = NULL;
  605. tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
  606. /* release the queue lookup reference on the completed IO */
  607. nvmet_fc_tgt_q_put(queue);
  608. spin_lock_irqsave(&queue->qlock, flags);
  609. deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
  610. struct nvmet_fc_defer_fcp_req, req_list);
  611. if (!deferfcp) {
  612. list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
  613. spin_unlock_irqrestore(&queue->qlock, flags);
  614. return;
  615. }
  616. /* Re-use the fod for the next pending cmd that was deferred */
  617. list_del(&deferfcp->req_list);
  618. fcpreq = deferfcp->fcp_req;
  619. /* deferfcp can be reused for another IO at a later date */
  620. list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
  621. spin_unlock_irqrestore(&queue->qlock, flags);
  622. /* Save NVME CMD IO in fod */
  623. memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
  624. /* Setup new fcpreq to be processed */
  625. fcpreq->rspaddr = NULL;
  626. fcpreq->rsplen = 0;
  627. fcpreq->nvmet_fc_private = fod;
  628. fod->fcpreq = fcpreq;
  629. fod->active = true;
  630. /* inform LLDD IO is now being processed */
  631. tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
  632. /*
  633. * Leave the queue lookup get reference taken when
  634. * fod was originally allocated.
  635. */
  636. queue_work(queue->work_q, &fod->defer_work);
  637. }
  638. static struct nvmet_fc_tgt_queue *
  639. nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
  640. u16 qid, u16 sqsize)
  641. {
  642. struct nvmet_fc_tgt_queue *queue;
  643. int ret;
  644. if (qid > NVMET_NR_QUEUES)
  645. return NULL;
  646. queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
  647. if (!queue)
  648. return NULL;
  649. if (!nvmet_fc_tgt_a_get(assoc))
  650. goto out_free_queue;
  651. queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
  652. assoc->tgtport->fc_target_port.port_num,
  653. assoc->a_id, qid);
  654. if (!queue->work_q)
  655. goto out_a_put;
  656. queue->qid = qid;
  657. queue->sqsize = sqsize;
  658. queue->assoc = assoc;
  659. INIT_LIST_HEAD(&queue->fod_list);
  660. INIT_LIST_HEAD(&queue->avail_defer_list);
  661. INIT_LIST_HEAD(&queue->pending_cmd_list);
  662. atomic_set(&queue->connected, 0);
  663. atomic_set(&queue->sqtail, 0);
  664. atomic_set(&queue->rsn, 1);
  665. atomic_set(&queue->zrspcnt, 0);
  666. spin_lock_init(&queue->qlock);
  667. kref_init(&queue->ref);
  668. nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
  669. ret = nvmet_sq_init(&queue->nvme_sq);
  670. if (ret)
  671. goto out_fail_iodlist;
  672. WARN_ON(assoc->queues[qid]);
  673. rcu_assign_pointer(assoc->queues[qid], queue);
  674. return queue;
  675. out_fail_iodlist:
  676. nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
  677. destroy_workqueue(queue->work_q);
  678. out_a_put:
  679. nvmet_fc_tgt_a_put(assoc);
  680. out_free_queue:
  681. kfree(queue);
  682. return NULL;
  683. }
  684. static void
  685. nvmet_fc_tgt_queue_free(struct kref *ref)
  686. {
  687. struct nvmet_fc_tgt_queue *queue =
  688. container_of(ref, struct nvmet_fc_tgt_queue, ref);
  689. rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL);
  690. nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
  691. nvmet_fc_tgt_a_put(queue->assoc);
  692. destroy_workqueue(queue->work_q);
  693. kfree_rcu(queue, rcu);
  694. }
  695. static void
  696. nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
  697. {
  698. kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
  699. }
  700. static int
  701. nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
  702. {
  703. return kref_get_unless_zero(&queue->ref);
  704. }
  705. static void
  706. nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
  707. {
  708. struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
  709. struct nvmet_fc_fcp_iod *fod = queue->fod;
  710. struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
  711. unsigned long flags;
  712. int i;
  713. bool disconnect;
  714. disconnect = atomic_xchg(&queue->connected, 0);
  715. /* if not connected, nothing to do */
  716. if (!disconnect)
  717. return;
  718. spin_lock_irqsave(&queue->qlock, flags);
  719. /* abort outstanding io's */
  720. for (i = 0; i < queue->sqsize; fod++, i++) {
  721. if (fod->active) {
  722. spin_lock(&fod->flock);
  723. fod->abort = true;
  724. /*
  725. * only call lldd abort routine if waiting for
  726. * writedata. other outstanding ops should finish
  727. * on their own.
  728. */
  729. if (fod->writedataactive) {
  730. fod->aborted = true;
  731. spin_unlock(&fod->flock);
  732. tgtport->ops->fcp_abort(
  733. &tgtport->fc_target_port, fod->fcpreq);
  734. } else
  735. spin_unlock(&fod->flock);
  736. }
  737. }
  738. /* Cleanup defer'ed IOs in queue */
  739. list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
  740. req_list) {
  741. list_del(&deferfcp->req_list);
  742. kfree(deferfcp);
  743. }
  744. for (;;) {
  745. deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
  746. struct nvmet_fc_defer_fcp_req, req_list);
  747. if (!deferfcp)
  748. break;
  749. list_del(&deferfcp->req_list);
  750. spin_unlock_irqrestore(&queue->qlock, flags);
  751. tgtport->ops->defer_rcv(&tgtport->fc_target_port,
  752. deferfcp->fcp_req);
  753. tgtport->ops->fcp_abort(&tgtport->fc_target_port,
  754. deferfcp->fcp_req);
  755. tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
  756. deferfcp->fcp_req);
  757. /* release the queue lookup reference */
  758. nvmet_fc_tgt_q_put(queue);
  759. kfree(deferfcp);
  760. spin_lock_irqsave(&queue->qlock, flags);
  761. }
  762. spin_unlock_irqrestore(&queue->qlock, flags);
  763. flush_workqueue(queue->work_q);
  764. nvmet_sq_destroy(&queue->nvme_sq);
  765. nvmet_fc_tgt_q_put(queue);
  766. }
  767. static struct nvmet_fc_tgt_queue *
  768. nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
  769. u64 connection_id)
  770. {
  771. struct nvmet_fc_tgt_assoc *assoc;
  772. struct nvmet_fc_tgt_queue *queue;
  773. u64 association_id = nvmet_fc_getassociationid(connection_id);
  774. u16 qid = nvmet_fc_getqueueid(connection_id);
  775. if (qid > NVMET_NR_QUEUES)
  776. return NULL;
  777. rcu_read_lock();
  778. list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
  779. if (association_id == assoc->association_id) {
  780. queue = rcu_dereference(assoc->queues[qid]);
  781. if (queue &&
  782. (!atomic_read(&queue->connected) ||
  783. !nvmet_fc_tgt_q_get(queue)))
  784. queue = NULL;
  785. rcu_read_unlock();
  786. return queue;
  787. }
  788. }
  789. rcu_read_unlock();
  790. return NULL;
  791. }
  792. static void
  793. nvmet_fc_hostport_free(struct kref *ref)
  794. {
  795. struct nvmet_fc_hostport *hostport =
  796. container_of(ref, struct nvmet_fc_hostport, ref);
  797. struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
  798. unsigned long flags;
  799. spin_lock_irqsave(&tgtport->lock, flags);
  800. list_del(&hostport->host_list);
  801. spin_unlock_irqrestore(&tgtport->lock, flags);
  802. if (tgtport->ops->host_release && hostport->invalid)
  803. tgtport->ops->host_release(hostport->hosthandle);
  804. kfree(hostport);
  805. nvmet_fc_tgtport_put(tgtport);
  806. }
  807. static void
  808. nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
  809. {
  810. kref_put(&hostport->ref, nvmet_fc_hostport_free);
  811. }
  812. static int
  813. nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
  814. {
  815. return kref_get_unless_zero(&hostport->ref);
  816. }
  817. static void
  818. nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
  819. {
  820. /* if LLDD not implemented, leave as NULL */
  821. if (!hostport || !hostport->hosthandle)
  822. return;
  823. nvmet_fc_hostport_put(hostport);
  824. }
  825. static struct nvmet_fc_hostport *
  826. nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
  827. {
  828. struct nvmet_fc_hostport *host;
  829. lockdep_assert_held(&tgtport->lock);
  830. list_for_each_entry(host, &tgtport->host_list, host_list) {
  831. if (host->hosthandle == hosthandle && !host->invalid) {
  832. if (nvmet_fc_hostport_get(host))
  833. return (host);
  834. }
  835. }
  836. return NULL;
  837. }
  838. static struct nvmet_fc_hostport *
  839. nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
  840. {
  841. struct nvmet_fc_hostport *newhost, *match = NULL;
  842. unsigned long flags;
  843. /* if LLDD not implemented, leave as NULL */
  844. if (!hosthandle)
  845. return NULL;
  846. /*
  847. * take reference for what will be the newly allocated hostport if
  848. * we end up using a new allocation
  849. */
  850. if (!nvmet_fc_tgtport_get(tgtport))
  851. return ERR_PTR(-EINVAL);
  852. spin_lock_irqsave(&tgtport->lock, flags);
  853. match = nvmet_fc_match_hostport(tgtport, hosthandle);
  854. spin_unlock_irqrestore(&tgtport->lock, flags);
  855. if (match) {
  856. /* no new allocation - release reference */
  857. nvmet_fc_tgtport_put(tgtport);
  858. return match;
  859. }
  860. newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
  861. if (!newhost) {
  862. /* no new allocation - release reference */
  863. nvmet_fc_tgtport_put(tgtport);
  864. return ERR_PTR(-ENOMEM);
  865. }
  866. spin_lock_irqsave(&tgtport->lock, flags);
  867. match = nvmet_fc_match_hostport(tgtport, hosthandle);
  868. if (match) {
  869. /* new allocation not needed */
  870. kfree(newhost);
  871. newhost = match;
  872. /* no new allocation - release reference */
  873. nvmet_fc_tgtport_put(tgtport);
  874. } else {
  875. newhost->tgtport = tgtport;
  876. newhost->hosthandle = hosthandle;
  877. INIT_LIST_HEAD(&newhost->host_list);
  878. kref_init(&newhost->ref);
  879. list_add_tail(&newhost->host_list, &tgtport->host_list);
  880. }
  881. spin_unlock_irqrestore(&tgtport->lock, flags);
  882. return newhost;
  883. }
  884. static void
  885. nvmet_fc_delete_assoc(struct work_struct *work)
  886. {
  887. struct nvmet_fc_tgt_assoc *assoc =
  888. container_of(work, struct nvmet_fc_tgt_assoc, del_work);
  889. nvmet_fc_delete_target_assoc(assoc);
  890. nvmet_fc_tgt_a_put(assoc);
  891. }
  892. static struct nvmet_fc_tgt_assoc *
  893. nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
  894. {
  895. struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
  896. unsigned long flags;
  897. u64 ran;
  898. int idx;
  899. bool needrandom = true;
  900. assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
  901. if (!assoc)
  902. return NULL;
  903. idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
  904. if (idx < 0)
  905. goto out_free_assoc;
  906. if (!nvmet_fc_tgtport_get(tgtport))
  907. goto out_ida;
  908. assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
  909. if (IS_ERR(assoc->hostport))
  910. goto out_put;
  911. assoc->tgtport = tgtport;
  912. assoc->a_id = idx;
  913. INIT_LIST_HEAD(&assoc->a_list);
  914. kref_init(&assoc->ref);
  915. INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
  916. atomic_set(&assoc->terminating, 0);
  917. while (needrandom) {
  918. get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
  919. ran = ran << BYTES_FOR_QID_SHIFT;
  920. spin_lock_irqsave(&tgtport->lock, flags);
  921. needrandom = false;
  922. list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
  923. if (ran == tmpassoc->association_id) {
  924. needrandom = true;
  925. break;
  926. }
  927. }
  928. if (!needrandom) {
  929. assoc->association_id = ran;
  930. list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
  931. }
  932. spin_unlock_irqrestore(&tgtport->lock, flags);
  933. }
  934. return assoc;
  935. out_put:
  936. nvmet_fc_tgtport_put(tgtport);
  937. out_ida:
  938. ida_free(&tgtport->assoc_cnt, idx);
  939. out_free_assoc:
  940. kfree(assoc);
  941. return NULL;
  942. }
  943. static void
  944. nvmet_fc_target_assoc_free(struct kref *ref)
  945. {
  946. struct nvmet_fc_tgt_assoc *assoc =
  947. container_of(ref, struct nvmet_fc_tgt_assoc, ref);
  948. struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  949. struct nvmet_fc_ls_iod *oldls;
  950. unsigned long flags;
  951. /* Send Disconnect now that all i/o has completed */
  952. nvmet_fc_xmt_disconnect_assoc(assoc);
  953. nvmet_fc_free_hostport(assoc->hostport);
  954. spin_lock_irqsave(&tgtport->lock, flags);
  955. list_del_rcu(&assoc->a_list);
  956. oldls = assoc->rcv_disconn;
  957. spin_unlock_irqrestore(&tgtport->lock, flags);
  958. /* if pending Rcv Disconnect Association LS, send rsp now */
  959. if (oldls)
  960. nvmet_fc_xmt_ls_rsp(tgtport, oldls);
  961. ida_free(&tgtport->assoc_cnt, assoc->a_id);
  962. dev_info(tgtport->dev,
  963. "{%d:%d} Association freed\n",
  964. tgtport->fc_target_port.port_num, assoc->a_id);
  965. kfree_rcu(assoc, rcu);
  966. nvmet_fc_tgtport_put(tgtport);
  967. }
  968. static void
  969. nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
  970. {
  971. kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
  972. }
  973. static int
  974. nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
  975. {
  976. return kref_get_unless_zero(&assoc->ref);
  977. }
  978. static void
  979. nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
  980. {
  981. struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  982. struct nvmet_fc_tgt_queue *queue;
  983. int i, terminating;
  984. terminating = atomic_xchg(&assoc->terminating, 1);
  985. /* if already terminating, do nothing */
  986. if (terminating)
  987. return;
  988. for (i = NVMET_NR_QUEUES; i >= 0; i--) {
  989. rcu_read_lock();
  990. queue = rcu_dereference(assoc->queues[i]);
  991. if (!queue) {
  992. rcu_read_unlock();
  993. continue;
  994. }
  995. if (!nvmet_fc_tgt_q_get(queue)) {
  996. rcu_read_unlock();
  997. continue;
  998. }
  999. rcu_read_unlock();
  1000. nvmet_fc_delete_target_queue(queue);
  1001. nvmet_fc_tgt_q_put(queue);
  1002. }
  1003. dev_info(tgtport->dev,
  1004. "{%d:%d} Association deleted\n",
  1005. tgtport->fc_target_port.port_num, assoc->a_id);
  1006. nvmet_fc_tgt_a_put(assoc);
  1007. }
  1008. static struct nvmet_fc_tgt_assoc *
  1009. nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
  1010. u64 association_id)
  1011. {
  1012. struct nvmet_fc_tgt_assoc *assoc;
  1013. struct nvmet_fc_tgt_assoc *ret = NULL;
  1014. rcu_read_lock();
  1015. list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
  1016. if (association_id == assoc->association_id) {
  1017. ret = assoc;
  1018. if (!nvmet_fc_tgt_a_get(assoc))
  1019. ret = NULL;
  1020. break;
  1021. }
  1022. }
  1023. rcu_read_unlock();
  1024. return ret;
  1025. }
  1026. static void
  1027. nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
  1028. struct nvmet_fc_port_entry *pe,
  1029. struct nvmet_port *port)
  1030. {
  1031. lockdep_assert_held(&nvmet_fc_tgtlock);
  1032. pe->tgtport = tgtport;
  1033. tgtport->pe = pe;
  1034. pe->port = port;
  1035. port->priv = pe;
  1036. pe->node_name = tgtport->fc_target_port.node_name;
  1037. pe->port_name = tgtport->fc_target_port.port_name;
  1038. INIT_LIST_HEAD(&pe->pe_list);
  1039. list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
  1040. }
  1041. static void
  1042. nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
  1043. {
  1044. unsigned long flags;
  1045. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1046. if (pe->tgtport)
  1047. pe->tgtport->pe = NULL;
  1048. list_del(&pe->pe_list);
  1049. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1050. }
  1051. /*
  1052. * called when a targetport deregisters. Breaks the relationship
  1053. * with the nvmet port, but leaves the port_entry in place so that
  1054. * re-registration can resume operation.
  1055. */
  1056. static void
  1057. nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
  1058. {
  1059. struct nvmet_fc_port_entry *pe;
  1060. unsigned long flags;
  1061. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1062. pe = tgtport->pe;
  1063. if (pe)
  1064. pe->tgtport = NULL;
  1065. tgtport->pe = NULL;
  1066. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1067. }
  1068. /*
  1069. * called when a new targetport is registered. Looks in the
  1070. * existing nvmet port_entries to see if the nvmet layer is
  1071. * configured for the targetport's wwn's. (the targetport existed,
  1072. * nvmet configured, the lldd unregistered the tgtport, and is now
  1073. * reregistering the same targetport). If so, set the nvmet port
  1074. * port entry on the targetport.
  1075. */
  1076. static void
  1077. nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
  1078. {
  1079. struct nvmet_fc_port_entry *pe;
  1080. unsigned long flags;
  1081. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1082. list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
  1083. if (tgtport->fc_target_port.node_name == pe->node_name &&
  1084. tgtport->fc_target_port.port_name == pe->port_name) {
  1085. WARN_ON(pe->tgtport);
  1086. tgtport->pe = pe;
  1087. pe->tgtport = tgtport;
  1088. break;
  1089. }
  1090. }
  1091. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1092. }
  1093. /**
  1094. * nvmet_fc_register_targetport - transport entry point called by an
  1095. * LLDD to register the existence of a local
  1096. * NVME subystem FC port.
  1097. * @pinfo: pointer to information about the port to be registered
  1098. * @template: LLDD entrypoints and operational parameters for the port
  1099. * @dev: physical hardware device node port corresponds to. Will be
  1100. * used for DMA mappings
  1101. * @portptr: pointer to a local port pointer. Upon success, the routine
  1102. * will allocate a nvme_fc_local_port structure and place its
  1103. * address in the local port pointer. Upon failure, local port
  1104. * pointer will be set to NULL.
  1105. *
  1106. * Returns:
  1107. * a completion status. Must be 0 upon success; a negative errno
  1108. * (ex: -ENXIO) upon failure.
  1109. */
  1110. int
  1111. nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
  1112. struct nvmet_fc_target_template *template,
  1113. struct device *dev,
  1114. struct nvmet_fc_target_port **portptr)
  1115. {
  1116. struct nvmet_fc_tgtport *newrec;
  1117. unsigned long flags;
  1118. int ret, idx;
  1119. if (!template->xmt_ls_rsp || !template->fcp_op ||
  1120. !template->fcp_abort ||
  1121. !template->fcp_req_release || !template->targetport_delete ||
  1122. !template->max_hw_queues || !template->max_sgl_segments ||
  1123. !template->max_dif_sgl_segments || !template->dma_boundary) {
  1124. ret = -EINVAL;
  1125. goto out_regtgt_failed;
  1126. }
  1127. newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
  1128. GFP_KERNEL);
  1129. if (!newrec) {
  1130. ret = -ENOMEM;
  1131. goto out_regtgt_failed;
  1132. }
  1133. idx = ida_alloc(&nvmet_fc_tgtport_cnt, GFP_KERNEL);
  1134. if (idx < 0) {
  1135. ret = -ENOSPC;
  1136. goto out_fail_kfree;
  1137. }
  1138. if (!get_device(dev) && dev) {
  1139. ret = -ENODEV;
  1140. goto out_ida_put;
  1141. }
  1142. newrec->fc_target_port.node_name = pinfo->node_name;
  1143. newrec->fc_target_port.port_name = pinfo->port_name;
  1144. if (template->target_priv_sz)
  1145. newrec->fc_target_port.private = &newrec[1];
  1146. else
  1147. newrec->fc_target_port.private = NULL;
  1148. newrec->fc_target_port.port_id = pinfo->port_id;
  1149. newrec->fc_target_port.port_num = idx;
  1150. INIT_LIST_HEAD(&newrec->tgt_list);
  1151. newrec->dev = dev;
  1152. newrec->ops = template;
  1153. spin_lock_init(&newrec->lock);
  1154. INIT_LIST_HEAD(&newrec->ls_rcv_list);
  1155. INIT_LIST_HEAD(&newrec->ls_req_list);
  1156. INIT_LIST_HEAD(&newrec->ls_busylist);
  1157. INIT_LIST_HEAD(&newrec->assoc_list);
  1158. INIT_LIST_HEAD(&newrec->host_list);
  1159. kref_init(&newrec->ref);
  1160. ida_init(&newrec->assoc_cnt);
  1161. newrec->max_sg_cnt = template->max_sgl_segments;
  1162. ret = nvmet_fc_alloc_ls_iodlist(newrec);
  1163. if (ret) {
  1164. ret = -ENOMEM;
  1165. goto out_free_newrec;
  1166. }
  1167. nvmet_fc_portentry_rebind_tgt(newrec);
  1168. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1169. list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
  1170. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1171. *portptr = &newrec->fc_target_port;
  1172. return 0;
  1173. out_free_newrec:
  1174. put_device(dev);
  1175. out_ida_put:
  1176. ida_free(&nvmet_fc_tgtport_cnt, idx);
  1177. out_fail_kfree:
  1178. kfree(newrec);
  1179. out_regtgt_failed:
  1180. *portptr = NULL;
  1181. return ret;
  1182. }
  1183. EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
  1184. static void
  1185. nvmet_fc_free_tgtport(struct kref *ref)
  1186. {
  1187. struct nvmet_fc_tgtport *tgtport =
  1188. container_of(ref, struct nvmet_fc_tgtport, ref);
  1189. struct device *dev = tgtport->dev;
  1190. unsigned long flags;
  1191. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1192. list_del(&tgtport->tgt_list);
  1193. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1194. nvmet_fc_free_ls_iodlist(tgtport);
  1195. /* let the LLDD know we've finished tearing it down */
  1196. tgtport->ops->targetport_delete(&tgtport->fc_target_port);
  1197. ida_free(&nvmet_fc_tgtport_cnt,
  1198. tgtport->fc_target_port.port_num);
  1199. ida_destroy(&tgtport->assoc_cnt);
  1200. kfree(tgtport);
  1201. put_device(dev);
  1202. }
  1203. static void
  1204. nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
  1205. {
  1206. kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
  1207. }
  1208. static int
  1209. nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
  1210. {
  1211. return kref_get_unless_zero(&tgtport->ref);
  1212. }
  1213. static void
  1214. __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
  1215. {
  1216. struct nvmet_fc_tgt_assoc *assoc;
  1217. rcu_read_lock();
  1218. list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
  1219. if (!nvmet_fc_tgt_a_get(assoc))
  1220. continue;
  1221. if (!queue_work(nvmet_wq, &assoc->del_work))
  1222. /* already deleting - release local reference */
  1223. nvmet_fc_tgt_a_put(assoc);
  1224. }
  1225. rcu_read_unlock();
  1226. }
  1227. /**
  1228. * nvmet_fc_invalidate_host - transport entry point called by an LLDD
  1229. * to remove references to a hosthandle for LS's.
  1230. *
  1231. * The nvmet-fc layer ensures that any references to the hosthandle
  1232. * on the targetport are forgotten (set to NULL). The LLDD will
  1233. * typically call this when a login with a remote host port has been
  1234. * lost, thus LS's for the remote host port are no longer possible.
  1235. *
  1236. * If an LS request is outstanding to the targetport/hosthandle (or
  1237. * issued concurrently with the call to invalidate the host), the
  1238. * LLDD is responsible for terminating/aborting the LS and completing
  1239. * the LS request. It is recommended that these terminations/aborts
  1240. * occur after calling to invalidate the host handle to avoid additional
  1241. * retries by the nvmet-fc transport. The nvmet-fc transport may
  1242. * continue to reference host handle while it cleans up outstanding
  1243. * NVME associations. The nvmet-fc transport will call the
  1244. * ops->host_release() callback to notify the LLDD that all references
  1245. * are complete and the related host handle can be recovered.
  1246. * Note: if there are no references, the callback may be called before
  1247. * the invalidate host call returns.
  1248. *
  1249. * @target_port: pointer to the (registered) target port that a prior
  1250. * LS was received on and which supplied the transport the
  1251. * hosthandle.
  1252. * @hosthandle: the handle (pointer) that represents the host port
  1253. * that no longer has connectivity and that LS's should
  1254. * no longer be directed to.
  1255. */
  1256. void
  1257. nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
  1258. void *hosthandle)
  1259. {
  1260. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  1261. struct nvmet_fc_tgt_assoc *assoc, *next;
  1262. unsigned long flags;
  1263. bool noassoc = true;
  1264. spin_lock_irqsave(&tgtport->lock, flags);
  1265. list_for_each_entry_safe(assoc, next,
  1266. &tgtport->assoc_list, a_list) {
  1267. if (!assoc->hostport ||
  1268. assoc->hostport->hosthandle != hosthandle)
  1269. continue;
  1270. if (!nvmet_fc_tgt_a_get(assoc))
  1271. continue;
  1272. assoc->hostport->invalid = 1;
  1273. noassoc = false;
  1274. if (!queue_work(nvmet_wq, &assoc->del_work))
  1275. /* already deleting - release local reference */
  1276. nvmet_fc_tgt_a_put(assoc);
  1277. }
  1278. spin_unlock_irqrestore(&tgtport->lock, flags);
  1279. /* if there's nothing to wait for - call the callback */
  1280. if (noassoc && tgtport->ops->host_release)
  1281. tgtport->ops->host_release(hosthandle);
  1282. }
  1283. EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
  1284. /*
  1285. * nvmet layer has called to terminate an association
  1286. */
  1287. static void
  1288. nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
  1289. {
  1290. struct nvmet_fc_tgtport *tgtport, *next;
  1291. struct nvmet_fc_tgt_assoc *assoc;
  1292. struct nvmet_fc_tgt_queue *queue;
  1293. unsigned long flags;
  1294. bool found_ctrl = false;
  1295. /* this is a bit ugly, but don't want to make locks layered */
  1296. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1297. list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
  1298. tgt_list) {
  1299. if (!nvmet_fc_tgtport_get(tgtport))
  1300. continue;
  1301. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1302. rcu_read_lock();
  1303. list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
  1304. queue = rcu_dereference(assoc->queues[0]);
  1305. if (queue && queue->nvme_sq.ctrl == ctrl) {
  1306. if (nvmet_fc_tgt_a_get(assoc))
  1307. found_ctrl = true;
  1308. break;
  1309. }
  1310. }
  1311. rcu_read_unlock();
  1312. nvmet_fc_tgtport_put(tgtport);
  1313. if (found_ctrl) {
  1314. if (!queue_work(nvmet_wq, &assoc->del_work))
  1315. /* already deleting - release local reference */
  1316. nvmet_fc_tgt_a_put(assoc);
  1317. return;
  1318. }
  1319. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1320. }
  1321. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1322. }
  1323. /**
  1324. * nvmet_fc_unregister_targetport - transport entry point called by an
  1325. * LLDD to deregister/remove a previously
  1326. * registered a local NVME subsystem FC port.
  1327. * @target_port: pointer to the (registered) target port that is to be
  1328. * deregistered.
  1329. *
  1330. * Returns:
  1331. * a completion status. Must be 0 upon success; a negative errno
  1332. * (ex: -ENXIO) upon failure.
  1333. */
  1334. int
  1335. nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
  1336. {
  1337. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  1338. nvmet_fc_portentry_unbind_tgt(tgtport);
  1339. /* terminate any outstanding associations */
  1340. __nvmet_fc_free_assocs(tgtport);
  1341. /*
  1342. * should terminate LS's as well. However, LS's will be generated
  1343. * at the tail end of association termination, so they likely don't
  1344. * exist yet. And even if they did, it's worthwhile to just let
  1345. * them finish and targetport ref counting will clean things up.
  1346. */
  1347. nvmet_fc_tgtport_put(tgtport);
  1348. return 0;
  1349. }
  1350. EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
  1351. /* ********************** FC-NVME LS RCV Handling ************************* */
  1352. static void
  1353. nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
  1354. struct nvmet_fc_ls_iod *iod)
  1355. {
  1356. struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
  1357. struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
  1358. struct nvmet_fc_tgt_queue *queue;
  1359. int ret = 0;
  1360. memset(acc, 0, sizeof(*acc));
  1361. /*
  1362. * FC-NVME spec changes. There are initiators sending different
  1363. * lengths as padding sizes for Create Association Cmd descriptor
  1364. * was incorrect.
  1365. * Accept anything of "minimum" length. Assume format per 1.15
  1366. * spec (with HOSTID reduced to 16 bytes), ignore how long the
  1367. * trailing pad length is.
  1368. */
  1369. if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
  1370. ret = VERR_CR_ASSOC_LEN;
  1371. else if (be32_to_cpu(rqst->desc_list_len) <
  1372. FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
  1373. ret = VERR_CR_ASSOC_RQST_LEN;
  1374. else if (rqst->assoc_cmd.desc_tag !=
  1375. cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
  1376. ret = VERR_CR_ASSOC_CMD;
  1377. else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
  1378. FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
  1379. ret = VERR_CR_ASSOC_CMD_LEN;
  1380. else if (!rqst->assoc_cmd.ersp_ratio ||
  1381. (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
  1382. be16_to_cpu(rqst->assoc_cmd.sqsize)))
  1383. ret = VERR_ERSP_RATIO;
  1384. else {
  1385. /* new association w/ admin queue */
  1386. iod->assoc = nvmet_fc_alloc_target_assoc(
  1387. tgtport, iod->hosthandle);
  1388. if (!iod->assoc)
  1389. ret = VERR_ASSOC_ALLOC_FAIL;
  1390. else {
  1391. queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
  1392. be16_to_cpu(rqst->assoc_cmd.sqsize));
  1393. if (!queue) {
  1394. ret = VERR_QUEUE_ALLOC_FAIL;
  1395. nvmet_fc_tgt_a_put(iod->assoc);
  1396. }
  1397. }
  1398. }
  1399. if (ret) {
  1400. dev_err(tgtport->dev,
  1401. "Create Association LS failed: %s\n",
  1402. validation_errors[ret]);
  1403. iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
  1404. sizeof(*acc), rqst->w0.ls_cmd,
  1405. FCNVME_RJT_RC_LOGIC,
  1406. FCNVME_RJT_EXP_NONE, 0);
  1407. return;
  1408. }
  1409. queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
  1410. atomic_set(&queue->connected, 1);
  1411. queue->sqhd = 0; /* best place to init value */
  1412. dev_info(tgtport->dev,
  1413. "{%d:%d} Association created\n",
  1414. tgtport->fc_target_port.port_num, iod->assoc->a_id);
  1415. /* format a response */
  1416. iod->lsrsp->rsplen = sizeof(*acc);
  1417. nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
  1418. fcnvme_lsdesc_len(
  1419. sizeof(struct fcnvme_ls_cr_assoc_acc)),
  1420. FCNVME_LS_CREATE_ASSOCIATION);
  1421. acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
  1422. acc->associd.desc_len =
  1423. fcnvme_lsdesc_len(
  1424. sizeof(struct fcnvme_lsdesc_assoc_id));
  1425. acc->associd.association_id =
  1426. cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
  1427. acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
  1428. acc->connectid.desc_len =
  1429. fcnvme_lsdesc_len(
  1430. sizeof(struct fcnvme_lsdesc_conn_id));
  1431. acc->connectid.connection_id = acc->associd.association_id;
  1432. }
  1433. static void
  1434. nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
  1435. struct nvmet_fc_ls_iod *iod)
  1436. {
  1437. struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
  1438. struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
  1439. struct nvmet_fc_tgt_queue *queue;
  1440. int ret = 0;
  1441. memset(acc, 0, sizeof(*acc));
  1442. if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
  1443. ret = VERR_CR_CONN_LEN;
  1444. else if (rqst->desc_list_len !=
  1445. fcnvme_lsdesc_len(
  1446. sizeof(struct fcnvme_ls_cr_conn_rqst)))
  1447. ret = VERR_CR_CONN_RQST_LEN;
  1448. else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
  1449. ret = VERR_ASSOC_ID;
  1450. else if (rqst->associd.desc_len !=
  1451. fcnvme_lsdesc_len(
  1452. sizeof(struct fcnvme_lsdesc_assoc_id)))
  1453. ret = VERR_ASSOC_ID_LEN;
  1454. else if (rqst->connect_cmd.desc_tag !=
  1455. cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
  1456. ret = VERR_CR_CONN_CMD;
  1457. else if (rqst->connect_cmd.desc_len !=
  1458. fcnvme_lsdesc_len(
  1459. sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
  1460. ret = VERR_CR_CONN_CMD_LEN;
  1461. else if (!rqst->connect_cmd.ersp_ratio ||
  1462. (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
  1463. be16_to_cpu(rqst->connect_cmd.sqsize)))
  1464. ret = VERR_ERSP_RATIO;
  1465. else {
  1466. /* new io queue */
  1467. iod->assoc = nvmet_fc_find_target_assoc(tgtport,
  1468. be64_to_cpu(rqst->associd.association_id));
  1469. if (!iod->assoc)
  1470. ret = VERR_NO_ASSOC;
  1471. else {
  1472. queue = nvmet_fc_alloc_target_queue(iod->assoc,
  1473. be16_to_cpu(rqst->connect_cmd.qid),
  1474. be16_to_cpu(rqst->connect_cmd.sqsize));
  1475. if (!queue)
  1476. ret = VERR_QUEUE_ALLOC_FAIL;
  1477. /* release get taken in nvmet_fc_find_target_assoc */
  1478. nvmet_fc_tgt_a_put(iod->assoc);
  1479. }
  1480. }
  1481. if (ret) {
  1482. dev_err(tgtport->dev,
  1483. "Create Connection LS failed: %s\n",
  1484. validation_errors[ret]);
  1485. iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
  1486. sizeof(*acc), rqst->w0.ls_cmd,
  1487. (ret == VERR_NO_ASSOC) ?
  1488. FCNVME_RJT_RC_INV_ASSOC :
  1489. FCNVME_RJT_RC_LOGIC,
  1490. FCNVME_RJT_EXP_NONE, 0);
  1491. return;
  1492. }
  1493. queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
  1494. atomic_set(&queue->connected, 1);
  1495. queue->sqhd = 0; /* best place to init value */
  1496. /* format a response */
  1497. iod->lsrsp->rsplen = sizeof(*acc);
  1498. nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
  1499. fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
  1500. FCNVME_LS_CREATE_CONNECTION);
  1501. acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
  1502. acc->connectid.desc_len =
  1503. fcnvme_lsdesc_len(
  1504. sizeof(struct fcnvme_lsdesc_conn_id));
  1505. acc->connectid.connection_id =
  1506. cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
  1507. be16_to_cpu(rqst->connect_cmd.qid)));
  1508. }
  1509. /*
  1510. * Returns true if the LS response is to be transmit
  1511. * Returns false if the LS response is to be delayed
  1512. */
  1513. static int
  1514. nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
  1515. struct nvmet_fc_ls_iod *iod)
  1516. {
  1517. struct fcnvme_ls_disconnect_assoc_rqst *rqst =
  1518. &iod->rqstbuf->rq_dis_assoc;
  1519. struct fcnvme_ls_disconnect_assoc_acc *acc =
  1520. &iod->rspbuf->rsp_dis_assoc;
  1521. struct nvmet_fc_tgt_assoc *assoc = NULL;
  1522. struct nvmet_fc_ls_iod *oldls = NULL;
  1523. unsigned long flags;
  1524. int ret = 0;
  1525. memset(acc, 0, sizeof(*acc));
  1526. ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
  1527. if (!ret) {
  1528. /* match an active association - takes an assoc ref if !NULL */
  1529. assoc = nvmet_fc_find_target_assoc(tgtport,
  1530. be64_to_cpu(rqst->associd.association_id));
  1531. iod->assoc = assoc;
  1532. if (!assoc)
  1533. ret = VERR_NO_ASSOC;
  1534. }
  1535. if (ret || !assoc) {
  1536. dev_err(tgtport->dev,
  1537. "Disconnect LS failed: %s\n",
  1538. validation_errors[ret]);
  1539. iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
  1540. sizeof(*acc), rqst->w0.ls_cmd,
  1541. (ret == VERR_NO_ASSOC) ?
  1542. FCNVME_RJT_RC_INV_ASSOC :
  1543. FCNVME_RJT_RC_LOGIC,
  1544. FCNVME_RJT_EXP_NONE, 0);
  1545. return true;
  1546. }
  1547. /* format a response */
  1548. iod->lsrsp->rsplen = sizeof(*acc);
  1549. nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
  1550. fcnvme_lsdesc_len(
  1551. sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
  1552. FCNVME_LS_DISCONNECT_ASSOC);
  1553. /* release get taken in nvmet_fc_find_target_assoc */
  1554. nvmet_fc_tgt_a_put(assoc);
  1555. /*
  1556. * The rules for LS response says the response cannot
  1557. * go back until ABTS's have been sent for all outstanding
  1558. * I/O and a Disconnect Association LS has been sent.
  1559. * So... save off the Disconnect LS to send the response
  1560. * later. If there was a prior LS already saved, replace
  1561. * it with the newer one and send a can't perform reject
  1562. * on the older one.
  1563. */
  1564. spin_lock_irqsave(&tgtport->lock, flags);
  1565. oldls = assoc->rcv_disconn;
  1566. assoc->rcv_disconn = iod;
  1567. spin_unlock_irqrestore(&tgtport->lock, flags);
  1568. nvmet_fc_delete_target_assoc(assoc);
  1569. if (oldls) {
  1570. dev_info(tgtport->dev,
  1571. "{%d:%d} Multiple Disconnect Association LS's "
  1572. "received\n",
  1573. tgtport->fc_target_port.port_num, assoc->a_id);
  1574. /* overwrite good response with bogus failure */
  1575. oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
  1576. sizeof(*iod->rspbuf),
  1577. /* ok to use rqst, LS is same */
  1578. rqst->w0.ls_cmd,
  1579. FCNVME_RJT_RC_UNAB,
  1580. FCNVME_RJT_EXP_NONE, 0);
  1581. nvmet_fc_xmt_ls_rsp(tgtport, oldls);
  1582. }
  1583. return false;
  1584. }
  1585. /* *********************** NVME Ctrl Routines **************************** */
  1586. static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
  1587. static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
  1588. static void
  1589. nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
  1590. {
  1591. struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
  1592. struct nvmet_fc_tgtport *tgtport = iod->tgtport;
  1593. fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
  1594. sizeof(*iod->rspbuf), DMA_TO_DEVICE);
  1595. nvmet_fc_free_ls_iod(tgtport, iod);
  1596. nvmet_fc_tgtport_put(tgtport);
  1597. }
  1598. static void
  1599. nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
  1600. struct nvmet_fc_ls_iod *iod)
  1601. {
  1602. int ret;
  1603. fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
  1604. sizeof(*iod->rspbuf), DMA_TO_DEVICE);
  1605. ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
  1606. if (ret)
  1607. nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
  1608. }
  1609. /*
  1610. * Actual processing routine for received FC-NVME LS Requests from the LLD
  1611. */
  1612. static void
  1613. nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
  1614. struct nvmet_fc_ls_iod *iod)
  1615. {
  1616. struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
  1617. bool sendrsp = true;
  1618. iod->lsrsp->nvme_fc_private = iod;
  1619. iod->lsrsp->rspbuf = iod->rspbuf;
  1620. iod->lsrsp->rspdma = iod->rspdma;
  1621. iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
  1622. /* Be preventative. handlers will later set to valid length */
  1623. iod->lsrsp->rsplen = 0;
  1624. iod->assoc = NULL;
  1625. /*
  1626. * handlers:
  1627. * parse request input, execute the request, and format the
  1628. * LS response
  1629. */
  1630. switch (w0->ls_cmd) {
  1631. case FCNVME_LS_CREATE_ASSOCIATION:
  1632. /* Creates Association and initial Admin Queue/Connection */
  1633. nvmet_fc_ls_create_association(tgtport, iod);
  1634. break;
  1635. case FCNVME_LS_CREATE_CONNECTION:
  1636. /* Creates an IO Queue/Connection */
  1637. nvmet_fc_ls_create_connection(tgtport, iod);
  1638. break;
  1639. case FCNVME_LS_DISCONNECT_ASSOC:
  1640. /* Terminate a Queue/Connection or the Association */
  1641. sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
  1642. break;
  1643. default:
  1644. iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
  1645. sizeof(*iod->rspbuf), w0->ls_cmd,
  1646. FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
  1647. }
  1648. if (sendrsp)
  1649. nvmet_fc_xmt_ls_rsp(tgtport, iod);
  1650. }
  1651. /*
  1652. * Actual processing routine for received FC-NVME LS Requests from the LLD
  1653. */
  1654. static void
  1655. nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
  1656. {
  1657. struct nvmet_fc_ls_iod *iod =
  1658. container_of(work, struct nvmet_fc_ls_iod, work);
  1659. struct nvmet_fc_tgtport *tgtport = iod->tgtport;
  1660. nvmet_fc_handle_ls_rqst(tgtport, iod);
  1661. }
  1662. /**
  1663. * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
  1664. * upon the reception of a NVME LS request.
  1665. *
  1666. * The nvmet-fc layer will copy payload to an internal structure for
  1667. * processing. As such, upon completion of the routine, the LLDD may
  1668. * immediately free/reuse the LS request buffer passed in the call.
  1669. *
  1670. * If this routine returns error, the LLDD should abort the exchange.
  1671. *
  1672. * @target_port: pointer to the (registered) target port the LS was
  1673. * received on.
  1674. * @hosthandle: pointer to the host specific data, gets stored in iod.
  1675. * @lsrsp: pointer to a lsrsp structure to be used to reference
  1676. * the exchange corresponding to the LS.
  1677. * @lsreqbuf: pointer to the buffer containing the LS Request
  1678. * @lsreqbuf_len: length, in bytes, of the received LS request
  1679. */
  1680. int
  1681. nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
  1682. void *hosthandle,
  1683. struct nvmefc_ls_rsp *lsrsp,
  1684. void *lsreqbuf, u32 lsreqbuf_len)
  1685. {
  1686. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  1687. struct nvmet_fc_ls_iod *iod;
  1688. struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
  1689. if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
  1690. dev_info(tgtport->dev,
  1691. "RCV %s LS failed: payload too large (%d)\n",
  1692. (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
  1693. nvmefc_ls_names[w0->ls_cmd] : "",
  1694. lsreqbuf_len);
  1695. return -E2BIG;
  1696. }
  1697. if (!nvmet_fc_tgtport_get(tgtport)) {
  1698. dev_info(tgtport->dev,
  1699. "RCV %s LS failed: target deleting\n",
  1700. (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
  1701. nvmefc_ls_names[w0->ls_cmd] : "");
  1702. return -ESHUTDOWN;
  1703. }
  1704. iod = nvmet_fc_alloc_ls_iod(tgtport);
  1705. if (!iod) {
  1706. dev_info(tgtport->dev,
  1707. "RCV %s LS failed: context allocation failed\n",
  1708. (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
  1709. nvmefc_ls_names[w0->ls_cmd] : "");
  1710. nvmet_fc_tgtport_put(tgtport);
  1711. return -ENOENT;
  1712. }
  1713. iod->lsrsp = lsrsp;
  1714. iod->fcpreq = NULL;
  1715. memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
  1716. iod->rqstdatalen = lsreqbuf_len;
  1717. iod->hosthandle = hosthandle;
  1718. queue_work(nvmet_wq, &iod->work);
  1719. return 0;
  1720. }
  1721. EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
  1722. /*
  1723. * **********************
  1724. * Start of FCP handling
  1725. * **********************
  1726. */
  1727. static int
  1728. nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
  1729. {
  1730. struct scatterlist *sg;
  1731. unsigned int nent;
  1732. sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
  1733. if (!sg)
  1734. goto out;
  1735. fod->data_sg = sg;
  1736. fod->data_sg_cnt = nent;
  1737. fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
  1738. ((fod->io_dir == NVMET_FCP_WRITE) ?
  1739. DMA_FROM_DEVICE : DMA_TO_DEVICE));
  1740. /* note: write from initiator perspective */
  1741. fod->next_sg = fod->data_sg;
  1742. return 0;
  1743. out:
  1744. return NVME_SC_INTERNAL;
  1745. }
  1746. static void
  1747. nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
  1748. {
  1749. if (!fod->data_sg || !fod->data_sg_cnt)
  1750. return;
  1751. fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
  1752. ((fod->io_dir == NVMET_FCP_WRITE) ?
  1753. DMA_FROM_DEVICE : DMA_TO_DEVICE));
  1754. sgl_free(fod->data_sg);
  1755. fod->data_sg = NULL;
  1756. fod->data_sg_cnt = 0;
  1757. }
  1758. static bool
  1759. queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
  1760. {
  1761. u32 sqtail, used;
  1762. /* egad, this is ugly. And sqtail is just a best guess */
  1763. sqtail = atomic_read(&q->sqtail) % q->sqsize;
  1764. used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
  1765. return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
  1766. }
  1767. /*
  1768. * Prep RSP payload.
  1769. * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
  1770. */
  1771. static void
  1772. nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
  1773. struct nvmet_fc_fcp_iod *fod)
  1774. {
  1775. struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
  1776. struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
  1777. struct nvme_completion *cqe = &ersp->cqe;
  1778. u32 *cqewd = (u32 *)cqe;
  1779. bool send_ersp = false;
  1780. u32 rsn, rspcnt, xfr_length;
  1781. if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
  1782. xfr_length = fod->req.transfer_len;
  1783. else
  1784. xfr_length = fod->offset;
  1785. /*
  1786. * check to see if we can send a 0's rsp.
  1787. * Note: to send a 0's response, the NVME-FC host transport will
  1788. * recreate the CQE. The host transport knows: sq id, SQHD (last
  1789. * seen in an ersp), and command_id. Thus it will create a
  1790. * zero-filled CQE with those known fields filled in. Transport
  1791. * must send an ersp for any condition where the cqe won't match
  1792. * this.
  1793. *
  1794. * Here are the FC-NVME mandated cases where we must send an ersp:
  1795. * every N responses, where N=ersp_ratio
  1796. * force fabric commands to send ersp's (not in FC-NVME but good
  1797. * practice)
  1798. * normal cmds: any time status is non-zero, or status is zero
  1799. * but words 0 or 1 are non-zero.
  1800. * the SQ is 90% or more full
  1801. * the cmd is a fused command
  1802. * transferred data length not equal to cmd iu length
  1803. */
  1804. rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
  1805. if (!(rspcnt % fod->queue->ersp_ratio) ||
  1806. nvme_is_fabrics((struct nvme_command *) sqe) ||
  1807. xfr_length != fod->req.transfer_len ||
  1808. (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
  1809. (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
  1810. queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
  1811. send_ersp = true;
  1812. /* re-set the fields */
  1813. fod->fcpreq->rspaddr = ersp;
  1814. fod->fcpreq->rspdma = fod->rspdma;
  1815. if (!send_ersp) {
  1816. memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
  1817. fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
  1818. } else {
  1819. ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
  1820. rsn = atomic_inc_return(&fod->queue->rsn);
  1821. ersp->rsn = cpu_to_be32(rsn);
  1822. ersp->xfrd_len = cpu_to_be32(xfr_length);
  1823. fod->fcpreq->rsplen = sizeof(*ersp);
  1824. }
  1825. fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
  1826. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  1827. }
  1828. static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
  1829. static void
  1830. nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
  1831. struct nvmet_fc_fcp_iod *fod)
  1832. {
  1833. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  1834. /* data no longer needed */
  1835. nvmet_fc_free_tgt_pgs(fod);
  1836. /*
  1837. * if an ABTS was received or we issued the fcp_abort early
  1838. * don't call abort routine again.
  1839. */
  1840. /* no need to take lock - lock was taken earlier to get here */
  1841. if (!fod->aborted)
  1842. tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
  1843. nvmet_fc_free_fcp_iod(fod->queue, fod);
  1844. }
  1845. static void
  1846. nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
  1847. struct nvmet_fc_fcp_iod *fod)
  1848. {
  1849. int ret;
  1850. fod->fcpreq->op = NVMET_FCOP_RSP;
  1851. fod->fcpreq->timeout = 0;
  1852. nvmet_fc_prep_fcp_rsp(tgtport, fod);
  1853. ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
  1854. if (ret)
  1855. nvmet_fc_abort_op(tgtport, fod);
  1856. }
  1857. static void
  1858. nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
  1859. struct nvmet_fc_fcp_iod *fod, u8 op)
  1860. {
  1861. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  1862. struct scatterlist *sg = fod->next_sg;
  1863. unsigned long flags;
  1864. u32 remaininglen = fod->req.transfer_len - fod->offset;
  1865. u32 tlen = 0;
  1866. int ret;
  1867. fcpreq->op = op;
  1868. fcpreq->offset = fod->offset;
  1869. fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
  1870. /*
  1871. * for next sequence:
  1872. * break at a sg element boundary
  1873. * attempt to keep sequence length capped at
  1874. * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
  1875. * be longer if a single sg element is larger
  1876. * than that amount. This is done to avoid creating
  1877. * a new sg list to use for the tgtport api.
  1878. */
  1879. fcpreq->sg = sg;
  1880. fcpreq->sg_cnt = 0;
  1881. while (tlen < remaininglen &&
  1882. fcpreq->sg_cnt < tgtport->max_sg_cnt &&
  1883. tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
  1884. fcpreq->sg_cnt++;
  1885. tlen += sg_dma_len(sg);
  1886. sg = sg_next(sg);
  1887. }
  1888. if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
  1889. fcpreq->sg_cnt++;
  1890. tlen += min_t(u32, sg_dma_len(sg), remaininglen);
  1891. sg = sg_next(sg);
  1892. }
  1893. if (tlen < remaininglen)
  1894. fod->next_sg = sg;
  1895. else
  1896. fod->next_sg = NULL;
  1897. fcpreq->transfer_length = tlen;
  1898. fcpreq->transferred_length = 0;
  1899. fcpreq->fcp_error = 0;
  1900. fcpreq->rsplen = 0;
  1901. /*
  1902. * If the last READDATA request: check if LLDD supports
  1903. * combined xfr with response.
  1904. */
  1905. if ((op == NVMET_FCOP_READDATA) &&
  1906. ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
  1907. (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
  1908. fcpreq->op = NVMET_FCOP_READDATA_RSP;
  1909. nvmet_fc_prep_fcp_rsp(tgtport, fod);
  1910. }
  1911. ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
  1912. if (ret) {
  1913. /*
  1914. * should be ok to set w/o lock as its in the thread of
  1915. * execution (not an async timer routine) and doesn't
  1916. * contend with any clearing action
  1917. */
  1918. fod->abort = true;
  1919. if (op == NVMET_FCOP_WRITEDATA) {
  1920. spin_lock_irqsave(&fod->flock, flags);
  1921. fod->writedataactive = false;
  1922. spin_unlock_irqrestore(&fod->flock, flags);
  1923. nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
  1924. } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
  1925. fcpreq->fcp_error = ret;
  1926. fcpreq->transferred_length = 0;
  1927. nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
  1928. }
  1929. }
  1930. }
  1931. static inline bool
  1932. __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
  1933. {
  1934. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  1935. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  1936. /* if in the middle of an io and we need to tear down */
  1937. if (abort) {
  1938. if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
  1939. nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
  1940. return true;
  1941. }
  1942. nvmet_fc_abort_op(tgtport, fod);
  1943. return true;
  1944. }
  1945. return false;
  1946. }
  1947. /*
  1948. * actual done handler for FCP operations when completed by the lldd
  1949. */
  1950. static void
  1951. nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
  1952. {
  1953. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  1954. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  1955. unsigned long flags;
  1956. bool abort;
  1957. spin_lock_irqsave(&fod->flock, flags);
  1958. abort = fod->abort;
  1959. fod->writedataactive = false;
  1960. spin_unlock_irqrestore(&fod->flock, flags);
  1961. switch (fcpreq->op) {
  1962. case NVMET_FCOP_WRITEDATA:
  1963. if (__nvmet_fc_fod_op_abort(fod, abort))
  1964. return;
  1965. if (fcpreq->fcp_error ||
  1966. fcpreq->transferred_length != fcpreq->transfer_length) {
  1967. spin_lock_irqsave(&fod->flock, flags);
  1968. fod->abort = true;
  1969. spin_unlock_irqrestore(&fod->flock, flags);
  1970. nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
  1971. return;
  1972. }
  1973. fod->offset += fcpreq->transferred_length;
  1974. if (fod->offset != fod->req.transfer_len) {
  1975. spin_lock_irqsave(&fod->flock, flags);
  1976. fod->writedataactive = true;
  1977. spin_unlock_irqrestore(&fod->flock, flags);
  1978. /* transfer the next chunk */
  1979. nvmet_fc_transfer_fcp_data(tgtport, fod,
  1980. NVMET_FCOP_WRITEDATA);
  1981. return;
  1982. }
  1983. /* data transfer complete, resume with nvmet layer */
  1984. fod->req.execute(&fod->req);
  1985. break;
  1986. case NVMET_FCOP_READDATA:
  1987. case NVMET_FCOP_READDATA_RSP:
  1988. if (__nvmet_fc_fod_op_abort(fod, abort))
  1989. return;
  1990. if (fcpreq->fcp_error ||
  1991. fcpreq->transferred_length != fcpreq->transfer_length) {
  1992. nvmet_fc_abort_op(tgtport, fod);
  1993. return;
  1994. }
  1995. /* success */
  1996. if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
  1997. /* data no longer needed */
  1998. nvmet_fc_free_tgt_pgs(fod);
  1999. nvmet_fc_free_fcp_iod(fod->queue, fod);
  2000. return;
  2001. }
  2002. fod->offset += fcpreq->transferred_length;
  2003. if (fod->offset != fod->req.transfer_len) {
  2004. /* transfer the next chunk */
  2005. nvmet_fc_transfer_fcp_data(tgtport, fod,
  2006. NVMET_FCOP_READDATA);
  2007. return;
  2008. }
  2009. /* data transfer complete, send response */
  2010. /* data no longer needed */
  2011. nvmet_fc_free_tgt_pgs(fod);
  2012. nvmet_fc_xmt_fcp_rsp(tgtport, fod);
  2013. break;
  2014. case NVMET_FCOP_RSP:
  2015. if (__nvmet_fc_fod_op_abort(fod, abort))
  2016. return;
  2017. nvmet_fc_free_fcp_iod(fod->queue, fod);
  2018. break;
  2019. default:
  2020. break;
  2021. }
  2022. }
  2023. static void
  2024. nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
  2025. {
  2026. struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
  2027. nvmet_fc_fod_op_done(fod);
  2028. }
  2029. /*
  2030. * actual completion handler after execution by the nvmet layer
  2031. */
  2032. static void
  2033. __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
  2034. struct nvmet_fc_fcp_iod *fod, int status)
  2035. {
  2036. struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
  2037. struct nvme_completion *cqe = &fod->rspiubuf.cqe;
  2038. unsigned long flags;
  2039. bool abort;
  2040. spin_lock_irqsave(&fod->flock, flags);
  2041. abort = fod->abort;
  2042. spin_unlock_irqrestore(&fod->flock, flags);
  2043. /* if we have a CQE, snoop the last sq_head value */
  2044. if (!status)
  2045. fod->queue->sqhd = cqe->sq_head;
  2046. if (abort) {
  2047. nvmet_fc_abort_op(tgtport, fod);
  2048. return;
  2049. }
  2050. /* if an error handling the cmd post initial parsing */
  2051. if (status) {
  2052. /* fudge up a failed CQE status for our transport error */
  2053. memset(cqe, 0, sizeof(*cqe));
  2054. cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
  2055. cqe->sq_id = cpu_to_le16(fod->queue->qid);
  2056. cqe->command_id = sqe->command_id;
  2057. cqe->status = cpu_to_le16(status);
  2058. } else {
  2059. /*
  2060. * try to push the data even if the SQE status is non-zero.
  2061. * There may be a status where data still was intended to
  2062. * be moved
  2063. */
  2064. if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
  2065. /* push the data over before sending rsp */
  2066. nvmet_fc_transfer_fcp_data(tgtport, fod,
  2067. NVMET_FCOP_READDATA);
  2068. return;
  2069. }
  2070. /* writes & no data - fall thru */
  2071. }
  2072. /* data no longer needed */
  2073. nvmet_fc_free_tgt_pgs(fod);
  2074. nvmet_fc_xmt_fcp_rsp(tgtport, fod);
  2075. }
  2076. static void
  2077. nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
  2078. {
  2079. struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
  2080. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  2081. __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
  2082. }
  2083. /*
  2084. * Actual processing routine for received FC-NVME I/O Requests from the LLD
  2085. */
  2086. static void
  2087. nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
  2088. struct nvmet_fc_fcp_iod *fod)
  2089. {
  2090. struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
  2091. u32 xfrlen = be32_to_cpu(cmdiu->data_len);
  2092. int ret;
  2093. /*
  2094. * Fused commands are currently not supported in the linux
  2095. * implementation.
  2096. *
  2097. * As such, the implementation of the FC transport does not
  2098. * look at the fused commands and order delivery to the upper
  2099. * layer until we have both based on csn.
  2100. */
  2101. fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
  2102. if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
  2103. fod->io_dir = NVMET_FCP_WRITE;
  2104. if (!nvme_is_write(&cmdiu->sqe))
  2105. goto transport_error;
  2106. } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
  2107. fod->io_dir = NVMET_FCP_READ;
  2108. if (nvme_is_write(&cmdiu->sqe))
  2109. goto transport_error;
  2110. } else {
  2111. fod->io_dir = NVMET_FCP_NODATA;
  2112. if (xfrlen)
  2113. goto transport_error;
  2114. }
  2115. fod->req.cmd = &fod->cmdiubuf.sqe;
  2116. fod->req.cqe = &fod->rspiubuf.cqe;
  2117. if (tgtport->pe)
  2118. fod->req.port = tgtport->pe->port;
  2119. /* clear any response payload */
  2120. memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
  2121. fod->data_sg = NULL;
  2122. fod->data_sg_cnt = 0;
  2123. ret = nvmet_req_init(&fod->req,
  2124. &fod->queue->nvme_cq,
  2125. &fod->queue->nvme_sq,
  2126. &nvmet_fc_tgt_fcp_ops);
  2127. if (!ret) {
  2128. /* bad SQE content or invalid ctrl state */
  2129. /* nvmet layer has already called op done to send rsp. */
  2130. return;
  2131. }
  2132. fod->req.transfer_len = xfrlen;
  2133. /* keep a running counter of tail position */
  2134. atomic_inc(&fod->queue->sqtail);
  2135. if (fod->req.transfer_len) {
  2136. ret = nvmet_fc_alloc_tgt_pgs(fod);
  2137. if (ret) {
  2138. nvmet_req_complete(&fod->req, ret);
  2139. return;
  2140. }
  2141. }
  2142. fod->req.sg = fod->data_sg;
  2143. fod->req.sg_cnt = fod->data_sg_cnt;
  2144. fod->offset = 0;
  2145. if (fod->io_dir == NVMET_FCP_WRITE) {
  2146. /* pull the data over before invoking nvmet layer */
  2147. nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
  2148. return;
  2149. }
  2150. /*
  2151. * Reads or no data:
  2152. *
  2153. * can invoke the nvmet_layer now. If read data, cmd completion will
  2154. * push the data
  2155. */
  2156. fod->req.execute(&fod->req);
  2157. return;
  2158. transport_error:
  2159. nvmet_fc_abort_op(tgtport, fod);
  2160. }
  2161. /**
  2162. * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
  2163. * upon the reception of a NVME FCP CMD IU.
  2164. *
  2165. * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
  2166. * layer for processing.
  2167. *
  2168. * The nvmet_fc layer allocates a local job structure (struct
  2169. * nvmet_fc_fcp_iod) from the queue for the io and copies the
  2170. * CMD IU buffer to the job structure. As such, on a successful
  2171. * completion (returns 0), the LLDD may immediately free/reuse
  2172. * the CMD IU buffer passed in the call.
  2173. *
  2174. * However, in some circumstances, due to the packetized nature of FC
  2175. * and the api of the FC LLDD which may issue a hw command to send the
  2176. * response, but the LLDD may not get the hw completion for that command
  2177. * and upcall the nvmet_fc layer before a new command may be
  2178. * asynchronously received - its possible for a command to be received
  2179. * before the LLDD and nvmet_fc have recycled the job structure. It gives
  2180. * the appearance of more commands received than fits in the sq.
  2181. * To alleviate this scenario, a temporary queue is maintained in the
  2182. * transport for pending LLDD requests waiting for a queue job structure.
  2183. * In these "overrun" cases, a temporary queue element is allocated
  2184. * the LLDD request and CMD iu buffer information remembered, and the
  2185. * routine returns a -EOVERFLOW status. Subsequently, when a queue job
  2186. * structure is freed, it is immediately reallocated for anything on the
  2187. * pending request list. The LLDDs defer_rcv() callback is called,
  2188. * informing the LLDD that it may reuse the CMD IU buffer, and the io
  2189. * is then started normally with the transport.
  2190. *
  2191. * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
  2192. * the completion as successful but must not reuse the CMD IU buffer
  2193. * until the LLDD's defer_rcv() callback has been called for the
  2194. * corresponding struct nvmefc_tgt_fcp_req pointer.
  2195. *
  2196. * If there is any other condition in which an error occurs, the
  2197. * transport will return a non-zero status indicating the error.
  2198. * In all cases other than -EOVERFLOW, the transport has not accepted the
  2199. * request and the LLDD should abort the exchange.
  2200. *
  2201. * @target_port: pointer to the (registered) target port the FCP CMD IU
  2202. * was received on.
  2203. * @fcpreq: pointer to a fcpreq request structure to be used to reference
  2204. * the exchange corresponding to the FCP Exchange.
  2205. * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
  2206. * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
  2207. */
  2208. int
  2209. nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
  2210. struct nvmefc_tgt_fcp_req *fcpreq,
  2211. void *cmdiubuf, u32 cmdiubuf_len)
  2212. {
  2213. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  2214. struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
  2215. struct nvmet_fc_tgt_queue *queue;
  2216. struct nvmet_fc_fcp_iod *fod;
  2217. struct nvmet_fc_defer_fcp_req *deferfcp;
  2218. unsigned long flags;
  2219. /* validate iu, so the connection id can be used to find the queue */
  2220. if ((cmdiubuf_len != sizeof(*cmdiu)) ||
  2221. (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
  2222. (cmdiu->fc_id != NVME_CMD_FC_ID) ||
  2223. (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
  2224. return -EIO;
  2225. queue = nvmet_fc_find_target_queue(tgtport,
  2226. be64_to_cpu(cmdiu->connection_id));
  2227. if (!queue)
  2228. return -ENOTCONN;
  2229. /*
  2230. * note: reference taken by find_target_queue
  2231. * After successful fod allocation, the fod will inherit the
  2232. * ownership of that reference and will remove the reference
  2233. * when the fod is freed.
  2234. */
  2235. spin_lock_irqsave(&queue->qlock, flags);
  2236. fod = nvmet_fc_alloc_fcp_iod(queue);
  2237. if (fod) {
  2238. spin_unlock_irqrestore(&queue->qlock, flags);
  2239. fcpreq->nvmet_fc_private = fod;
  2240. fod->fcpreq = fcpreq;
  2241. memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
  2242. nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
  2243. return 0;
  2244. }
  2245. if (!tgtport->ops->defer_rcv) {
  2246. spin_unlock_irqrestore(&queue->qlock, flags);
  2247. /* release the queue lookup reference */
  2248. nvmet_fc_tgt_q_put(queue);
  2249. return -ENOENT;
  2250. }
  2251. deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
  2252. struct nvmet_fc_defer_fcp_req, req_list);
  2253. if (deferfcp) {
  2254. /* Just re-use one that was previously allocated */
  2255. list_del(&deferfcp->req_list);
  2256. } else {
  2257. spin_unlock_irqrestore(&queue->qlock, flags);
  2258. /* Now we need to dynamically allocate one */
  2259. deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
  2260. if (!deferfcp) {
  2261. /* release the queue lookup reference */
  2262. nvmet_fc_tgt_q_put(queue);
  2263. return -ENOMEM;
  2264. }
  2265. spin_lock_irqsave(&queue->qlock, flags);
  2266. }
  2267. /* For now, use rspaddr / rsplen to save payload information */
  2268. fcpreq->rspaddr = cmdiubuf;
  2269. fcpreq->rsplen = cmdiubuf_len;
  2270. deferfcp->fcp_req = fcpreq;
  2271. /* defer processing till a fod becomes available */
  2272. list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
  2273. /* NOTE: the queue lookup reference is still valid */
  2274. spin_unlock_irqrestore(&queue->qlock, flags);
  2275. return -EOVERFLOW;
  2276. }
  2277. EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
  2278. /**
  2279. * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
  2280. * upon the reception of an ABTS for a FCP command
  2281. *
  2282. * Notify the transport that an ABTS has been received for a FCP command
  2283. * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
  2284. * LLDD believes the command is still being worked on
  2285. * (template_ops->fcp_req_release() has not been called).
  2286. *
  2287. * The transport will wait for any outstanding work (an op to the LLDD,
  2288. * which the lldd should complete with error due to the ABTS; or the
  2289. * completion from the nvmet layer of the nvme command), then will
  2290. * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
  2291. * return the i/o context to the LLDD. The LLDD may send the BA_ACC
  2292. * to the ABTS either after return from this function (assuming any
  2293. * outstanding op work has been terminated) or upon the callback being
  2294. * called.
  2295. *
  2296. * @target_port: pointer to the (registered) target port the FCP CMD IU
  2297. * was received on.
  2298. * @fcpreq: pointer to the fcpreq request structure that corresponds
  2299. * to the exchange that received the ABTS.
  2300. */
  2301. void
  2302. nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
  2303. struct nvmefc_tgt_fcp_req *fcpreq)
  2304. {
  2305. struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
  2306. struct nvmet_fc_tgt_queue *queue;
  2307. unsigned long flags;
  2308. if (!fod || fod->fcpreq != fcpreq)
  2309. /* job appears to have already completed, ignore abort */
  2310. return;
  2311. queue = fod->queue;
  2312. spin_lock_irqsave(&queue->qlock, flags);
  2313. if (fod->active) {
  2314. /*
  2315. * mark as abort. The abort handler, invoked upon completion
  2316. * of any work, will detect the aborted status and do the
  2317. * callback.
  2318. */
  2319. spin_lock(&fod->flock);
  2320. fod->abort = true;
  2321. fod->aborted = true;
  2322. spin_unlock(&fod->flock);
  2323. }
  2324. spin_unlock_irqrestore(&queue->qlock, flags);
  2325. }
  2326. EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
  2327. struct nvmet_fc_traddr {
  2328. u64 nn;
  2329. u64 pn;
  2330. };
  2331. static int
  2332. __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
  2333. {
  2334. u64 token64;
  2335. if (match_u64(sstr, &token64))
  2336. return -EINVAL;
  2337. *val = token64;
  2338. return 0;
  2339. }
  2340. /*
  2341. * This routine validates and extracts the WWN's from the TRADDR string.
  2342. * As kernel parsers need the 0x to determine number base, universally
  2343. * build string to parse with 0x prefix before parsing name strings.
  2344. */
  2345. static int
  2346. nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
  2347. {
  2348. char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
  2349. substring_t wwn = { name, &name[sizeof(name)-1] };
  2350. int nnoffset, pnoffset;
  2351. /* validate if string is one of the 2 allowed formats */
  2352. if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
  2353. !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
  2354. !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
  2355. "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
  2356. nnoffset = NVME_FC_TRADDR_OXNNLEN;
  2357. pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
  2358. NVME_FC_TRADDR_OXNNLEN;
  2359. } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
  2360. !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
  2361. !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
  2362. "pn-", NVME_FC_TRADDR_NNLEN))) {
  2363. nnoffset = NVME_FC_TRADDR_NNLEN;
  2364. pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
  2365. } else
  2366. goto out_einval;
  2367. name[0] = '0';
  2368. name[1] = 'x';
  2369. name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
  2370. memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
  2371. if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
  2372. goto out_einval;
  2373. memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
  2374. if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
  2375. goto out_einval;
  2376. return 0;
  2377. out_einval:
  2378. pr_warn("%s: bad traddr string\n", __func__);
  2379. return -EINVAL;
  2380. }
  2381. static int
  2382. nvmet_fc_add_port(struct nvmet_port *port)
  2383. {
  2384. struct nvmet_fc_tgtport *tgtport;
  2385. struct nvmet_fc_port_entry *pe;
  2386. struct nvmet_fc_traddr traddr = { 0L, 0L };
  2387. unsigned long flags;
  2388. int ret;
  2389. /* validate the address info */
  2390. if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
  2391. (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
  2392. return -EINVAL;
  2393. /* map the traddr address info to a target port */
  2394. ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
  2395. sizeof(port->disc_addr.traddr));
  2396. if (ret)
  2397. return ret;
  2398. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  2399. if (!pe)
  2400. return -ENOMEM;
  2401. ret = -ENXIO;
  2402. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  2403. list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
  2404. if ((tgtport->fc_target_port.node_name == traddr.nn) &&
  2405. (tgtport->fc_target_port.port_name == traddr.pn)) {
  2406. /* a FC port can only be 1 nvmet port id */
  2407. if (!tgtport->pe) {
  2408. nvmet_fc_portentry_bind(tgtport, pe, port);
  2409. ret = 0;
  2410. } else
  2411. ret = -EALREADY;
  2412. break;
  2413. }
  2414. }
  2415. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  2416. if (ret)
  2417. kfree(pe);
  2418. return ret;
  2419. }
  2420. static void
  2421. nvmet_fc_remove_port(struct nvmet_port *port)
  2422. {
  2423. struct nvmet_fc_port_entry *pe = port->priv;
  2424. nvmet_fc_portentry_unbind(pe);
  2425. kfree(pe);
  2426. }
  2427. static void
  2428. nvmet_fc_discovery_chg(struct nvmet_port *port)
  2429. {
  2430. struct nvmet_fc_port_entry *pe = port->priv;
  2431. struct nvmet_fc_tgtport *tgtport = pe->tgtport;
  2432. if (tgtport && tgtport->ops->discovery_event)
  2433. tgtport->ops->discovery_event(&tgtport->fc_target_port);
  2434. }
  2435. static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
  2436. .owner = THIS_MODULE,
  2437. .type = NVMF_TRTYPE_FC,
  2438. .msdbd = 1,
  2439. .add_port = nvmet_fc_add_port,
  2440. .remove_port = nvmet_fc_remove_port,
  2441. .queue_response = nvmet_fc_fcp_nvme_cmd_done,
  2442. .delete_ctrl = nvmet_fc_delete_ctrl,
  2443. .discovery_chg = nvmet_fc_discovery_chg,
  2444. };
  2445. static int __init nvmet_fc_init_module(void)
  2446. {
  2447. return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
  2448. }
  2449. static void __exit nvmet_fc_exit_module(void)
  2450. {
  2451. /* sanity check - all lports should be removed */
  2452. if (!list_empty(&nvmet_fc_target_list))
  2453. pr_warn("%s: targetport list not empty\n", __func__);
  2454. nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
  2455. ida_destroy(&nvmet_fc_tgtport_cnt);
  2456. }
  2457. module_init(nvmet_fc_init_module);
  2458. module_exit(nvmet_fc_exit_module);
  2459. MODULE_LICENSE("GPL v2");