efct_lio.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
  4. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  5. */
  6. #include <target/target_core_base.h>
  7. #include <target/target_core_fabric.h>
  8. #include "efct_driver.h"
  9. #include "efct_lio.h"
  10. /*
  11. * lio_wq is used to call the LIO backed during creation or deletion of
  12. * sessions. This brings serialization to the session management as we create
  13. * single threaded work queue.
  14. */
  15. static struct workqueue_struct *lio_wq;
  16. static int
  17. efct_format_wwn(char *str, size_t len, const char *pre, u64 wwn)
  18. {
  19. u8 a[8];
  20. put_unaligned_be64(wwn, a);
  21. return snprintf(str, len, "%s%8phC", pre, a);
  22. }
  23. static int
  24. efct_lio_parse_wwn(const char *name, u64 *wwp, u8 npiv)
  25. {
  26. int num;
  27. u8 b[8];
  28. if (npiv) {
  29. num = sscanf(name,
  30. "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx",
  31. &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
  32. &b[7]);
  33. } else {
  34. num = sscanf(name,
  35. "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
  36. &b[0], &b[1], &b[2], &b[3], &b[4], &b[5], &b[6],
  37. &b[7]);
  38. }
  39. if (num != 8)
  40. return -EINVAL;
  41. *wwp = get_unaligned_be64(b);
  42. return 0;
  43. }
  44. static int
  45. efct_lio_parse_npiv_wwn(const char *name, size_t size, u64 *wwpn, u64 *wwnn)
  46. {
  47. unsigned int cnt = size;
  48. int rc;
  49. *wwpn = *wwnn = 0;
  50. if (name[cnt - 1] == '\n' || name[cnt - 1] == 0)
  51. cnt--;
  52. /* validate we have enough characters for WWPN */
  53. if ((cnt != (16 + 1 + 16)) || (name[16] != ':'))
  54. return -EINVAL;
  55. rc = efct_lio_parse_wwn(&name[0], wwpn, 1);
  56. if (rc)
  57. return rc;
  58. rc = efct_lio_parse_wwn(&name[17], wwnn, 1);
  59. if (rc)
  60. return rc;
  61. return 0;
  62. }
  63. static ssize_t
  64. efct_lio_tpg_enable_show(struct config_item *item, char *page)
  65. {
  66. struct se_portal_group *se_tpg = to_tpg(item);
  67. struct efct_lio_tpg *tpg =
  68. container_of(se_tpg, struct efct_lio_tpg, tpg);
  69. return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
  70. }
  71. static ssize_t
  72. efct_lio_tpg_enable_store(struct config_item *item, const char *page,
  73. size_t count)
  74. {
  75. struct se_portal_group *se_tpg = to_tpg(item);
  76. struct efct_lio_tpg *tpg =
  77. container_of(se_tpg, struct efct_lio_tpg, tpg);
  78. struct efct *efct;
  79. struct efc *efc;
  80. unsigned long op;
  81. if (!tpg->nport || !tpg->nport->efct) {
  82. pr_err("%s: Unable to find EFCT device\n", __func__);
  83. return -EINVAL;
  84. }
  85. efct = tpg->nport->efct;
  86. efc = efct->efcport;
  87. if (kstrtoul(page, 0, &op) < 0)
  88. return -EINVAL;
  89. if (op == 1) {
  90. int ret;
  91. tpg->enabled = true;
  92. efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
  93. ret = efct_xport_control(efct->xport, EFCT_XPORT_PORT_ONLINE);
  94. if (ret) {
  95. efct->tgt_efct.lio_nport = NULL;
  96. efc_log_debug(efct, "cannot bring port online\n");
  97. return ret;
  98. }
  99. } else if (op == 0) {
  100. efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
  101. if (efc->domain && efc->domain->nport)
  102. efct_scsi_tgt_del_nport(efc, efc->domain->nport);
  103. tpg->enabled = false;
  104. } else {
  105. return -EINVAL;
  106. }
  107. return count;
  108. }
  109. static ssize_t
  110. efct_lio_npiv_tpg_enable_show(struct config_item *item, char *page)
  111. {
  112. struct se_portal_group *se_tpg = to_tpg(item);
  113. struct efct_lio_tpg *tpg =
  114. container_of(se_tpg, struct efct_lio_tpg, tpg);
  115. return snprintf(page, PAGE_SIZE, "%d\n", tpg->enabled);
  116. }
  117. static ssize_t
  118. efct_lio_npiv_tpg_enable_store(struct config_item *item, const char *page,
  119. size_t count)
  120. {
  121. struct se_portal_group *se_tpg = to_tpg(item);
  122. struct efct_lio_tpg *tpg =
  123. container_of(se_tpg, struct efct_lio_tpg, tpg);
  124. struct efct_lio_vport *lio_vport = tpg->vport;
  125. struct efct *efct;
  126. struct efc *efc;
  127. unsigned long op;
  128. if (kstrtoul(page, 0, &op) < 0)
  129. return -EINVAL;
  130. if (!lio_vport) {
  131. pr_err("Unable to find vport\n");
  132. return -EINVAL;
  133. }
  134. efct = lio_vport->efct;
  135. efc = efct->efcport;
  136. if (op == 1) {
  137. tpg->enabled = true;
  138. efc_log_debug(efct, "enable portal group %d\n", tpg->tpgt);
  139. if (efc->domain) {
  140. int ret;
  141. ret = efc_nport_vport_new(efc->domain,
  142. lio_vport->npiv_wwpn,
  143. lio_vport->npiv_wwnn,
  144. U32_MAX, false, true,
  145. NULL, NULL);
  146. if (ret != 0) {
  147. efc_log_err(efct, "Failed to create Vport\n");
  148. return ret;
  149. }
  150. return count;
  151. }
  152. if (!(efc_vport_create_spec(efc, lio_vport->npiv_wwnn,
  153. lio_vport->npiv_wwpn, U32_MAX,
  154. false, true, NULL, NULL)))
  155. return -ENOMEM;
  156. } else if (op == 0) {
  157. efc_log_debug(efct, "disable portal group %d\n", tpg->tpgt);
  158. tpg->enabled = false;
  159. /* only physical nport should exist, free lio_nport
  160. * allocated in efct_lio_make_nport
  161. */
  162. if (efc->domain) {
  163. efc_nport_vport_del(efct->efcport, efc->domain,
  164. lio_vport->npiv_wwpn,
  165. lio_vport->npiv_wwnn);
  166. return count;
  167. }
  168. } else {
  169. return -EINVAL;
  170. }
  171. return count;
  172. }
  173. static char *efct_lio_get_fabric_wwn(struct se_portal_group *se_tpg)
  174. {
  175. struct efct_lio_tpg *tpg =
  176. container_of(se_tpg, struct efct_lio_tpg, tpg);
  177. return tpg->nport->wwpn_str;
  178. }
  179. static char *efct_lio_get_npiv_fabric_wwn(struct se_portal_group *se_tpg)
  180. {
  181. struct efct_lio_tpg *tpg =
  182. container_of(se_tpg, struct efct_lio_tpg, tpg);
  183. return tpg->vport->wwpn_str;
  184. }
  185. static u16 efct_lio_get_tag(struct se_portal_group *se_tpg)
  186. {
  187. struct efct_lio_tpg *tpg =
  188. container_of(se_tpg, struct efct_lio_tpg, tpg);
  189. return tpg->tpgt;
  190. }
  191. static u16 efct_lio_get_npiv_tag(struct se_portal_group *se_tpg)
  192. {
  193. struct efct_lio_tpg *tpg =
  194. container_of(se_tpg, struct efct_lio_tpg, tpg);
  195. return tpg->tpgt;
  196. }
  197. static int efct_lio_check_demo_mode(struct se_portal_group *se_tpg)
  198. {
  199. return 1;
  200. }
  201. static int efct_lio_check_demo_mode_cache(struct se_portal_group *se_tpg)
  202. {
  203. return 1;
  204. }
  205. static int efct_lio_check_demo_write_protect(struct se_portal_group *se_tpg)
  206. {
  207. struct efct_lio_tpg *tpg =
  208. container_of(se_tpg, struct efct_lio_tpg, tpg);
  209. return tpg->tpg_attrib.demo_mode_write_protect;
  210. }
  211. static int
  212. efct_lio_npiv_check_demo_write_protect(struct se_portal_group *se_tpg)
  213. {
  214. struct efct_lio_tpg *tpg =
  215. container_of(se_tpg, struct efct_lio_tpg, tpg);
  216. return tpg->tpg_attrib.demo_mode_write_protect;
  217. }
  218. static int efct_lio_check_prod_write_protect(struct se_portal_group *se_tpg)
  219. {
  220. struct efct_lio_tpg *tpg =
  221. container_of(se_tpg, struct efct_lio_tpg, tpg);
  222. return tpg->tpg_attrib.prod_mode_write_protect;
  223. }
  224. static int
  225. efct_lio_npiv_check_prod_write_protect(struct se_portal_group *se_tpg)
  226. {
  227. struct efct_lio_tpg *tpg =
  228. container_of(se_tpg, struct efct_lio_tpg, tpg);
  229. return tpg->tpg_attrib.prod_mode_write_protect;
  230. }
  231. static u32 efct_lio_tpg_get_inst_index(struct se_portal_group *se_tpg)
  232. {
  233. return 1;
  234. }
  235. static int efct_lio_check_stop_free(struct se_cmd *se_cmd)
  236. {
  237. struct efct_scsi_tgt_io *ocp =
  238. container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
  239. struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
  240. efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_CHK_STOP_FREE);
  241. return target_put_sess_cmd(se_cmd);
  242. }
  243. static int
  244. efct_lio_abort_tgt_cb(struct efct_io *io,
  245. enum efct_scsi_io_status scsi_status,
  246. u32 flags, void *arg)
  247. {
  248. efct_lio_io_printf(io, "Abort done, status:%d\n", scsi_status);
  249. return 0;
  250. }
  251. static void
  252. efct_lio_aborted_task(struct se_cmd *se_cmd)
  253. {
  254. struct efct_scsi_tgt_io *ocp =
  255. container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
  256. struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
  257. efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_ABORTED_TASK);
  258. if (ocp->rsp_sent)
  259. return;
  260. /* command has been aborted, cleanup here */
  261. ocp->aborting = true;
  262. ocp->err = EFCT_SCSI_STATUS_ABORTED;
  263. /* terminate the exchange */
  264. efct_scsi_tgt_abort_io(io, efct_lio_abort_tgt_cb, NULL);
  265. }
  266. static void efct_lio_release_cmd(struct se_cmd *se_cmd)
  267. {
  268. struct efct_scsi_tgt_io *ocp =
  269. container_of(se_cmd, struct efct_scsi_tgt_io, cmd);
  270. struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
  271. struct efct *efct = io->efct;
  272. efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_RELEASE_CMD);
  273. efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_CMPL_CMD);
  274. efct_scsi_io_complete(io);
  275. atomic_sub_return(1, &efct->tgt_efct.ios_in_use);
  276. }
  277. static void efct_lio_close_session(struct se_session *se_sess)
  278. {
  279. struct efc_node *node = se_sess->fabric_sess_ptr;
  280. pr_debug("se_sess=%p node=%p", se_sess, node);
  281. if (!node) {
  282. pr_debug("node is NULL");
  283. return;
  284. }
  285. efc_node_post_shutdown(node, NULL);
  286. }
  287. static u32 efct_lio_sess_get_index(struct se_session *se_sess)
  288. {
  289. return 0;
  290. }
  291. static void efct_lio_set_default_node_attrs(struct se_node_acl *nacl)
  292. {
  293. }
  294. static int efct_lio_get_cmd_state(struct se_cmd *cmd)
  295. {
  296. struct efct_scsi_tgt_io *ocp =
  297. container_of(cmd, struct efct_scsi_tgt_io, cmd);
  298. struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
  299. return io->tgt_io.state;
  300. }
  301. static int
  302. efct_lio_sg_map(struct efct_io *io)
  303. {
  304. struct efct_scsi_tgt_io *ocp = &io->tgt_io;
  305. struct se_cmd *cmd = &ocp->cmd;
  306. ocp->seg_map_cnt = dma_map_sg(&io->efct->pci->dev, cmd->t_data_sg,
  307. cmd->t_data_nents, cmd->data_direction);
  308. if (ocp->seg_map_cnt == 0)
  309. return -EFAULT;
  310. return 0;
  311. }
  312. static void
  313. efct_lio_sg_unmap(struct efct_io *io)
  314. {
  315. struct efct_scsi_tgt_io *ocp = &io->tgt_io;
  316. struct se_cmd *cmd = &ocp->cmd;
  317. if (WARN_ON(!ocp->seg_map_cnt || !cmd->t_data_sg))
  318. return;
  319. dma_unmap_sg(&io->efct->pci->dev, cmd->t_data_sg,
  320. ocp->seg_map_cnt, cmd->data_direction);
  321. ocp->seg_map_cnt = 0;
  322. }
  323. static int
  324. efct_lio_status_done(struct efct_io *io,
  325. enum efct_scsi_io_status scsi_status,
  326. u32 flags, void *arg)
  327. {
  328. struct efct_scsi_tgt_io *ocp = &io->tgt_io;
  329. efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RSP_DONE);
  330. if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
  331. efct_lio_io_printf(io, "callback completed with error=%d\n",
  332. scsi_status);
  333. ocp->err = scsi_status;
  334. }
  335. if (ocp->seg_map_cnt)
  336. efct_lio_sg_unmap(io);
  337. efct_lio_io_printf(io, "status=%d, err=%d flags=0x%x, dir=%d\n",
  338. scsi_status, ocp->err, flags, ocp->ddir);
  339. efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
  340. transport_generic_free_cmd(&io->tgt_io.cmd, 0);
  341. return 0;
  342. }
  343. static int
  344. efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
  345. u32 flags, void *arg);
  346. static int
  347. efct_lio_write_pending(struct se_cmd *cmd)
  348. {
  349. struct efct_scsi_tgt_io *ocp =
  350. container_of(cmd, struct efct_scsi_tgt_io, cmd);
  351. struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
  352. struct efct_scsi_sgl *sgl = io->sgl;
  353. struct scatterlist *sg;
  354. u32 flags = 0, cnt, curcnt;
  355. u64 length = 0;
  356. efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_WRITE_PENDING);
  357. efct_lio_io_printf(io, "trans_state=0x%x se_cmd_flags=0x%x\n",
  358. cmd->transport_state, cmd->se_cmd_flags);
  359. if (ocp->seg_cnt == 0) {
  360. ocp->seg_cnt = cmd->t_data_nents;
  361. ocp->cur_seg = 0;
  362. if (efct_lio_sg_map(io)) {
  363. efct_lio_io_printf(io, "efct_lio_sg_map failed\n");
  364. return -EFAULT;
  365. }
  366. }
  367. curcnt = (ocp->seg_map_cnt - ocp->cur_seg);
  368. curcnt = (curcnt < io->sgl_allocated) ? curcnt : io->sgl_allocated;
  369. /* find current sg */
  370. for (cnt = 0, sg = cmd->t_data_sg; cnt < ocp->cur_seg; cnt++,
  371. sg = sg_next(sg))
  372. ;/* do nothing */
  373. for (cnt = 0; cnt < curcnt; cnt++, sg = sg_next(sg)) {
  374. sgl[cnt].addr = sg_dma_address(sg);
  375. sgl[cnt].dif_addr = 0;
  376. sgl[cnt].len = sg_dma_len(sg);
  377. length += sgl[cnt].len;
  378. ocp->cur_seg++;
  379. }
  380. if (ocp->cur_seg == ocp->seg_cnt)
  381. flags = EFCT_SCSI_LAST_DATAPHASE;
  382. return efct_scsi_recv_wr_data(io, flags, sgl, curcnt, length,
  383. efct_lio_datamove_done, NULL);
  384. }
  385. static int
  386. efct_lio_queue_data_in(struct se_cmd *cmd)
  387. {
  388. struct efct_scsi_tgt_io *ocp =
  389. container_of(cmd, struct efct_scsi_tgt_io, cmd);
  390. struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
  391. struct efct_scsi_sgl *sgl = io->sgl;
  392. struct scatterlist *sg = NULL;
  393. uint flags = 0, cnt = 0, curcnt = 0;
  394. u64 length = 0;
  395. efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_DATA_IN);
  396. if (ocp->seg_cnt == 0) {
  397. if (cmd->data_length) {
  398. ocp->seg_cnt = cmd->t_data_nents;
  399. ocp->cur_seg = 0;
  400. if (efct_lio_sg_map(io)) {
  401. efct_lio_io_printf(io,
  402. "efct_lio_sg_map failed\n");
  403. return -EAGAIN;
  404. }
  405. } else {
  406. /* If command length is 0, send the response status */
  407. struct efct_scsi_cmd_resp rsp;
  408. memset(&rsp, 0, sizeof(rsp));
  409. efct_lio_io_printf(io,
  410. "cmd : %p length 0, send status\n",
  411. cmd);
  412. return efct_scsi_send_resp(io, 0, &rsp,
  413. efct_lio_status_done, NULL);
  414. }
  415. }
  416. curcnt = min(ocp->seg_map_cnt - ocp->cur_seg, io->sgl_allocated);
  417. while (cnt < curcnt) {
  418. sg = &cmd->t_data_sg[ocp->cur_seg];
  419. sgl[cnt].addr = sg_dma_address(sg);
  420. sgl[cnt].dif_addr = 0;
  421. if (ocp->transferred_len + sg_dma_len(sg) >= cmd->data_length)
  422. sgl[cnt].len = cmd->data_length - ocp->transferred_len;
  423. else
  424. sgl[cnt].len = sg_dma_len(sg);
  425. ocp->transferred_len += sgl[cnt].len;
  426. length += sgl[cnt].len;
  427. ocp->cur_seg++;
  428. cnt++;
  429. if (ocp->transferred_len == cmd->data_length)
  430. break;
  431. }
  432. if (ocp->transferred_len == cmd->data_length) {
  433. flags = EFCT_SCSI_LAST_DATAPHASE;
  434. ocp->seg_cnt = ocp->cur_seg;
  435. }
  436. /* If there is residual, disable Auto Good Response */
  437. if (cmd->residual_count)
  438. flags |= EFCT_SCSI_NO_AUTO_RESPONSE;
  439. efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RD_DATA);
  440. return efct_scsi_send_rd_data(io, flags, sgl, curcnt, length,
  441. efct_lio_datamove_done, NULL);
  442. }
  443. static void
  444. efct_lio_send_resp(struct efct_io *io, enum efct_scsi_io_status scsi_status,
  445. u32 flags)
  446. {
  447. struct efct_scsi_cmd_resp rsp;
  448. struct efct_scsi_tgt_io *ocp = &io->tgt_io;
  449. struct se_cmd *cmd = &io->tgt_io.cmd;
  450. int rc;
  451. if (flags & EFCT_SCSI_IO_CMPL_RSP_SENT) {
  452. ocp->rsp_sent = true;
  453. efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
  454. transport_generic_free_cmd(&io->tgt_io.cmd, 0);
  455. return;
  456. }
  457. /* send check condition if an error occurred */
  458. memset(&rsp, 0, sizeof(rsp));
  459. rsp.scsi_status = cmd->scsi_status;
  460. rsp.sense_data = (uint8_t *)io->tgt_io.sense_buffer;
  461. rsp.sense_data_length = cmd->scsi_sense_length;
  462. /* Check for residual underrun or overrun */
  463. if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
  464. rsp.residual = -cmd->residual_count;
  465. else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
  466. rsp.residual = cmd->residual_count;
  467. rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
  468. efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
  469. if (rc != 0) {
  470. efct_lio_io_printf(io, "Read done, send rsp failed %d\n", rc);
  471. efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
  472. transport_generic_free_cmd(&io->tgt_io.cmd, 0);
  473. } else {
  474. ocp->rsp_sent = true;
  475. }
  476. }
  477. static int
  478. efct_lio_datamove_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
  479. u32 flags, void *arg)
  480. {
  481. struct efct_scsi_tgt_io *ocp = &io->tgt_io;
  482. efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_DATA_DONE);
  483. if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
  484. efct_lio_io_printf(io, "callback completed with error=%d\n",
  485. scsi_status);
  486. ocp->err = scsi_status;
  487. }
  488. efct_lio_io_printf(io, "seg_map_cnt=%d\n", ocp->seg_map_cnt);
  489. if (ocp->seg_map_cnt) {
  490. if (ocp->err == EFCT_SCSI_STATUS_GOOD &&
  491. ocp->cur_seg < ocp->seg_cnt) {
  492. int rc;
  493. efct_lio_io_printf(io, "continuing cmd at segm=%d\n",
  494. ocp->cur_seg);
  495. if (ocp->ddir == DMA_TO_DEVICE)
  496. rc = efct_lio_write_pending(&ocp->cmd);
  497. else
  498. rc = efct_lio_queue_data_in(&ocp->cmd);
  499. if (!rc)
  500. return 0;
  501. ocp->err = EFCT_SCSI_STATUS_ERROR;
  502. efct_lio_io_printf(io, "could not continue command\n");
  503. }
  504. efct_lio_sg_unmap(io);
  505. }
  506. if (io->tgt_io.aborting) {
  507. efct_lio_io_printf(io, "IO done aborted\n");
  508. return 0;
  509. }
  510. if (ocp->ddir == DMA_TO_DEVICE) {
  511. efct_lio_io_printf(io, "Write done, trans_state=0x%x\n",
  512. io->tgt_io.cmd.transport_state);
  513. if (scsi_status != EFCT_SCSI_STATUS_GOOD) {
  514. transport_generic_request_failure(&io->tgt_io.cmd,
  515. TCM_CHECK_CONDITION_ABORT_CMD);
  516. efct_set_lio_io_state(io,
  517. EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE);
  518. } else {
  519. efct_set_lio_io_state(io,
  520. EFCT_LIO_STATE_TGT_EXECUTE_CMD);
  521. target_execute_cmd(&io->tgt_io.cmd);
  522. }
  523. } else {
  524. efct_lio_send_resp(io, scsi_status, flags);
  525. }
  526. return 0;
  527. }
  528. static int
  529. efct_lio_tmf_done(struct efct_io *io, enum efct_scsi_io_status scsi_status,
  530. u32 flags, void *arg)
  531. {
  532. efct_lio_tmfio_printf(io, "cmd=%p status=%d, flags=0x%x\n",
  533. &io->tgt_io.cmd, scsi_status, flags);
  534. efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_GENERIC_FREE);
  535. transport_generic_free_cmd(&io->tgt_io.cmd, 0);
  536. return 0;
  537. }
  538. static int
  539. efct_lio_null_tmf_done(struct efct_io *tmfio,
  540. enum efct_scsi_io_status scsi_status,
  541. u32 flags, void *arg)
  542. {
  543. efct_lio_tmfio_printf(tmfio, "cmd=%p status=%d, flags=0x%x\n",
  544. &tmfio->tgt_io.cmd, scsi_status, flags);
  545. /* free struct efct_io only, no active se_cmd */
  546. efct_scsi_io_complete(tmfio);
  547. return 0;
  548. }
  549. static int
  550. efct_lio_queue_status(struct se_cmd *cmd)
  551. {
  552. struct efct_scsi_cmd_resp rsp;
  553. struct efct_scsi_tgt_io *ocp =
  554. container_of(cmd, struct efct_scsi_tgt_io, cmd);
  555. struct efct_io *io = container_of(ocp, struct efct_io, tgt_io);
  556. int rc = 0;
  557. efct_set_lio_io_state(io, EFCT_LIO_STATE_TFO_QUEUE_STATUS);
  558. efct_lio_io_printf(io,
  559. "status=0x%x trans_state=0x%x se_cmd_flags=0x%x sns_len=%d\n",
  560. cmd->scsi_status, cmd->transport_state, cmd->se_cmd_flags,
  561. cmd->scsi_sense_length);
  562. memset(&rsp, 0, sizeof(rsp));
  563. rsp.scsi_status = cmd->scsi_status;
  564. rsp.sense_data = (u8 *)io->tgt_io.sense_buffer;
  565. rsp.sense_data_length = cmd->scsi_sense_length;
  566. /* Check for residual underrun or overrun, mark negitive value for
  567. * underrun to recognize in HW
  568. */
  569. if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
  570. rsp.residual = -cmd->residual_count;
  571. else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)
  572. rsp.residual = cmd->residual_count;
  573. rc = efct_scsi_send_resp(io, 0, &rsp, efct_lio_status_done, NULL);
  574. efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_SEND_RSP);
  575. if (rc == 0)
  576. ocp->rsp_sent = true;
  577. return rc;
  578. }
  579. static void efct_lio_queue_tm_rsp(struct se_cmd *cmd)
  580. {
  581. struct efct_scsi_tgt_io *ocp =
  582. container_of(cmd, struct efct_scsi_tgt_io, cmd);
  583. struct efct_io *tmfio = container_of(ocp, struct efct_io, tgt_io);
  584. struct se_tmr_req *se_tmr = cmd->se_tmr_req;
  585. u8 rspcode;
  586. efct_lio_tmfio_printf(tmfio, "cmd=%p function=0x%x tmr->response=%d\n",
  587. cmd, se_tmr->function, se_tmr->response);
  588. switch (se_tmr->response) {
  589. case TMR_FUNCTION_COMPLETE:
  590. rspcode = EFCT_SCSI_TMF_FUNCTION_COMPLETE;
  591. break;
  592. case TMR_TASK_DOES_NOT_EXIST:
  593. rspcode = EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND;
  594. break;
  595. case TMR_LUN_DOES_NOT_EXIST:
  596. rspcode = EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER;
  597. break;
  598. case TMR_FUNCTION_REJECTED:
  599. default:
  600. rspcode = EFCT_SCSI_TMF_FUNCTION_REJECTED;
  601. break;
  602. }
  603. efct_scsi_send_tmf_resp(tmfio, rspcode, NULL, efct_lio_tmf_done, NULL);
  604. }
  605. static struct efct *efct_find_wwpn(u64 wwpn)
  606. {
  607. struct efct *efct;
  608. /* Search for the HBA that has this WWPN */
  609. list_for_each_entry(efct, &efct_devices, list_entry) {
  610. if (wwpn == efct_get_wwpn(&efct->hw))
  611. return efct;
  612. }
  613. return NULL;
  614. }
  615. static struct se_wwn *
  616. efct_lio_make_nport(struct target_fabric_configfs *tf,
  617. struct config_group *group, const char *name)
  618. {
  619. struct efct_lio_nport *lio_nport;
  620. struct efct *efct;
  621. int ret;
  622. u64 wwpn;
  623. ret = efct_lio_parse_wwn(name, &wwpn, 0);
  624. if (ret)
  625. return ERR_PTR(ret);
  626. efct = efct_find_wwpn(wwpn);
  627. if (!efct) {
  628. pr_err("cannot find EFCT for base wwpn %s\n", name);
  629. return ERR_PTR(-ENXIO);
  630. }
  631. lio_nport = kzalloc(sizeof(*lio_nport), GFP_KERNEL);
  632. if (!lio_nport)
  633. return ERR_PTR(-ENOMEM);
  634. lio_nport->efct = efct;
  635. lio_nport->wwpn = wwpn;
  636. efct_format_wwn(lio_nport->wwpn_str, sizeof(lio_nport->wwpn_str),
  637. "naa.", wwpn);
  638. efct->tgt_efct.lio_nport = lio_nport;
  639. return &lio_nport->nport_wwn;
  640. }
  641. static struct se_wwn *
  642. efct_lio_npiv_make_nport(struct target_fabric_configfs *tf,
  643. struct config_group *group, const char *name)
  644. {
  645. struct efct_lio_vport *lio_vport;
  646. struct efct *efct;
  647. int ret;
  648. u64 p_wwpn, npiv_wwpn, npiv_wwnn;
  649. char *p, *pbuf, tmp[128];
  650. struct efct_lio_vport_list_t *vport_list;
  651. struct fc_vport *new_fc_vport;
  652. struct fc_vport_identifiers vport_id;
  653. unsigned long flags = 0;
  654. snprintf(tmp, sizeof(tmp), "%s", name);
  655. pbuf = &tmp[0];
  656. p = strsep(&pbuf, "@");
  657. if (!p || !pbuf) {
  658. pr_err("Unable to find separator operator(@)\n");
  659. return ERR_PTR(-EINVAL);
  660. }
  661. ret = efct_lio_parse_wwn(p, &p_wwpn, 0);
  662. if (ret)
  663. return ERR_PTR(ret);
  664. ret = efct_lio_parse_npiv_wwn(pbuf, strlen(pbuf), &npiv_wwpn,
  665. &npiv_wwnn);
  666. if (ret)
  667. return ERR_PTR(ret);
  668. efct = efct_find_wwpn(p_wwpn);
  669. if (!efct) {
  670. pr_err("cannot find EFCT for base wwpn %s\n", name);
  671. return ERR_PTR(-ENXIO);
  672. }
  673. lio_vport = kzalloc(sizeof(*lio_vport), GFP_KERNEL);
  674. if (!lio_vport)
  675. return ERR_PTR(-ENOMEM);
  676. lio_vport->efct = efct;
  677. lio_vport->wwpn = p_wwpn;
  678. lio_vport->npiv_wwpn = npiv_wwpn;
  679. lio_vport->npiv_wwnn = npiv_wwnn;
  680. efct_format_wwn(lio_vport->wwpn_str, sizeof(lio_vport->wwpn_str),
  681. "naa.", npiv_wwpn);
  682. vport_list = kzalloc(sizeof(*vport_list), GFP_KERNEL);
  683. if (!vport_list) {
  684. kfree(lio_vport);
  685. return ERR_PTR(-ENOMEM);
  686. }
  687. vport_list->lio_vport = lio_vport;
  688. memset(&vport_id, 0, sizeof(vport_id));
  689. vport_id.port_name = npiv_wwpn;
  690. vport_id.node_name = npiv_wwnn;
  691. vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
  692. vport_id.vport_type = FC_PORTTYPE_NPIV;
  693. vport_id.disable = false;
  694. new_fc_vport = fc_vport_create(efct->shost, 0, &vport_id);
  695. if (!new_fc_vport) {
  696. efc_log_err(efct, "fc_vport_create failed\n");
  697. kfree(lio_vport);
  698. kfree(vport_list);
  699. return ERR_PTR(-ENOMEM);
  700. }
  701. lio_vport->fc_vport = new_fc_vport;
  702. spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
  703. INIT_LIST_HEAD(&vport_list->list_entry);
  704. list_add_tail(&vport_list->list_entry, &efct->tgt_efct.vport_list);
  705. spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
  706. return &lio_vport->vport_wwn;
  707. }
  708. static void
  709. efct_lio_drop_nport(struct se_wwn *wwn)
  710. {
  711. struct efct_lio_nport *lio_nport =
  712. container_of(wwn, struct efct_lio_nport, nport_wwn);
  713. struct efct *efct = lio_nport->efct;
  714. /* only physical nport should exist, free lio_nport allocated
  715. * in efct_lio_make_nport.
  716. */
  717. kfree(efct->tgt_efct.lio_nport);
  718. efct->tgt_efct.lio_nport = NULL;
  719. }
  720. static void
  721. efct_lio_npiv_drop_nport(struct se_wwn *wwn)
  722. {
  723. struct efct_lio_vport *lio_vport =
  724. container_of(wwn, struct efct_lio_vport, vport_wwn);
  725. struct efct_lio_vport_list_t *vport, *next_vport;
  726. struct efct *efct = lio_vport->efct;
  727. unsigned long flags = 0;
  728. if (lio_vport->fc_vport)
  729. fc_vport_terminate(lio_vport->fc_vport);
  730. spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
  731. list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
  732. list_entry) {
  733. if (vport->lio_vport == lio_vport) {
  734. list_del(&vport->list_entry);
  735. kfree(vport->lio_vport);
  736. kfree(vport);
  737. break;
  738. }
  739. }
  740. spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
  741. }
  742. static struct se_portal_group *
  743. efct_lio_make_tpg(struct se_wwn *wwn, const char *name)
  744. {
  745. struct efct_lio_nport *lio_nport =
  746. container_of(wwn, struct efct_lio_nport, nport_wwn);
  747. struct efct_lio_tpg *tpg;
  748. struct efct *efct;
  749. unsigned long n;
  750. int ret;
  751. if (strstr(name, "tpgt_") != name)
  752. return ERR_PTR(-EINVAL);
  753. if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
  754. return ERR_PTR(-EINVAL);
  755. tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
  756. if (!tpg)
  757. return ERR_PTR(-ENOMEM);
  758. tpg->nport = lio_nport;
  759. tpg->tpgt = n;
  760. tpg->enabled = false;
  761. tpg->tpg_attrib.generate_node_acls = 1;
  762. tpg->tpg_attrib.demo_mode_write_protect = 1;
  763. tpg->tpg_attrib.cache_dynamic_acls = 1;
  764. tpg->tpg_attrib.demo_mode_login_only = 1;
  765. tpg->tpg_attrib.session_deletion_wait = 1;
  766. ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
  767. if (ret < 0) {
  768. kfree(tpg);
  769. return NULL;
  770. }
  771. efct = lio_nport->efct;
  772. efct->tgt_efct.tpg = tpg;
  773. efc_log_debug(efct, "create portal group %d\n", tpg->tpgt);
  774. xa_init(&efct->lookup);
  775. return &tpg->tpg;
  776. }
  777. static void
  778. efct_lio_drop_tpg(struct se_portal_group *se_tpg)
  779. {
  780. struct efct_lio_tpg *tpg =
  781. container_of(se_tpg, struct efct_lio_tpg, tpg);
  782. struct efct *efct = tpg->nport->efct;
  783. efc_log_debug(efct, "drop portal group %d\n", tpg->tpgt);
  784. tpg->nport->efct->tgt_efct.tpg = NULL;
  785. core_tpg_deregister(se_tpg);
  786. xa_destroy(&efct->lookup);
  787. kfree(tpg);
  788. }
  789. static struct se_portal_group *
  790. efct_lio_npiv_make_tpg(struct se_wwn *wwn, const char *name)
  791. {
  792. struct efct_lio_vport *lio_vport =
  793. container_of(wwn, struct efct_lio_vport, vport_wwn);
  794. struct efct_lio_tpg *tpg;
  795. struct efct *efct;
  796. unsigned long n;
  797. int ret;
  798. efct = lio_vport->efct;
  799. if (strstr(name, "tpgt_") != name)
  800. return ERR_PTR(-EINVAL);
  801. if (kstrtoul(name + 5, 10, &n) || n > USHRT_MAX)
  802. return ERR_PTR(-EINVAL);
  803. if (n != 1) {
  804. efc_log_err(efct, "Invalid tpgt index: %ld provided\n", n);
  805. return ERR_PTR(-EINVAL);
  806. }
  807. tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
  808. if (!tpg)
  809. return ERR_PTR(-ENOMEM);
  810. tpg->vport = lio_vport;
  811. tpg->tpgt = n;
  812. tpg->enabled = false;
  813. tpg->tpg_attrib.generate_node_acls = 1;
  814. tpg->tpg_attrib.demo_mode_write_protect = 1;
  815. tpg->tpg_attrib.cache_dynamic_acls = 1;
  816. tpg->tpg_attrib.demo_mode_login_only = 1;
  817. tpg->tpg_attrib.session_deletion_wait = 1;
  818. ret = core_tpg_register(wwn, &tpg->tpg, SCSI_PROTOCOL_FCP);
  819. if (ret < 0) {
  820. kfree(tpg);
  821. return NULL;
  822. }
  823. lio_vport->tpg = tpg;
  824. efc_log_debug(efct, "create vport portal group %d\n", tpg->tpgt);
  825. return &tpg->tpg;
  826. }
  827. static void
  828. efct_lio_npiv_drop_tpg(struct se_portal_group *se_tpg)
  829. {
  830. struct efct_lio_tpg *tpg =
  831. container_of(se_tpg, struct efct_lio_tpg, tpg);
  832. efc_log_debug(tpg->vport->efct, "drop npiv portal group %d\n",
  833. tpg->tpgt);
  834. core_tpg_deregister(se_tpg);
  835. kfree(tpg);
  836. }
  837. static int
  838. efct_lio_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
  839. {
  840. struct efct_lio_nacl *nacl;
  841. u64 wwnn;
  842. if (efct_lio_parse_wwn(name, &wwnn, 0) < 0)
  843. return -EINVAL;
  844. nacl = container_of(se_nacl, struct efct_lio_nacl, se_node_acl);
  845. nacl->nport_wwnn = wwnn;
  846. efct_format_wwn(nacl->nport_name, sizeof(nacl->nport_name), "", wwnn);
  847. return 0;
  848. }
  849. static int efct_lio_check_demo_mode_login_only(struct se_portal_group *stpg)
  850. {
  851. struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
  852. return tpg->tpg_attrib.demo_mode_login_only;
  853. }
  854. static int
  855. efct_lio_npiv_check_demo_mode_login_only(struct se_portal_group *stpg)
  856. {
  857. struct efct_lio_tpg *tpg = container_of(stpg, struct efct_lio_tpg, tpg);
  858. return tpg->tpg_attrib.demo_mode_login_only;
  859. }
  860. static struct efct_lio_tpg *
  861. efct_get_vport_tpg(struct efc_node *node)
  862. {
  863. struct efct *efct;
  864. u64 wwpn = node->nport->wwpn;
  865. struct efct_lio_vport_list_t *vport, *next;
  866. struct efct_lio_vport *lio_vport = NULL;
  867. struct efct_lio_tpg *tpg = NULL;
  868. unsigned long flags = 0;
  869. efct = node->efc->base;
  870. spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
  871. list_for_each_entry_safe(vport, next, &efct->tgt_efct.vport_list,
  872. list_entry) {
  873. lio_vport = vport->lio_vport;
  874. if (wwpn && lio_vport && lio_vport->npiv_wwpn == wwpn) {
  875. efc_log_debug(efct, "found tpg on vport\n");
  876. tpg = lio_vport->tpg;
  877. break;
  878. }
  879. }
  880. spin_unlock_irqrestore(&efct->tgt_efct.efct_lio_lock, flags);
  881. return tpg;
  882. }
  883. static void
  884. _efct_tgt_node_free(struct kref *arg)
  885. {
  886. struct efct_node *tgt_node = container_of(arg, struct efct_node, ref);
  887. struct efc_node *node = tgt_node->node;
  888. efc_scsi_del_initiator_complete(node->efc, node);
  889. kfree(tgt_node);
  890. }
  891. static int efct_session_cb(struct se_portal_group *se_tpg,
  892. struct se_session *se_sess, void *private)
  893. {
  894. struct efc_node *node = private;
  895. struct efct_node *tgt_node;
  896. struct efct *efct = node->efc->base;
  897. tgt_node = kzalloc(sizeof(*tgt_node), GFP_KERNEL);
  898. if (!tgt_node)
  899. return -ENOMEM;
  900. kref_init(&tgt_node->ref);
  901. tgt_node->release = _efct_tgt_node_free;
  902. tgt_node->session = se_sess;
  903. node->tgt_node = tgt_node;
  904. tgt_node->efct = efct;
  905. tgt_node->node = node;
  906. tgt_node->node_fc_id = node->rnode.fc_id;
  907. tgt_node->port_fc_id = node->nport->fc_id;
  908. tgt_node->vpi = node->nport->indicator;
  909. tgt_node->rpi = node->rnode.indicator;
  910. spin_lock_init(&tgt_node->active_ios_lock);
  911. INIT_LIST_HEAD(&tgt_node->active_ios);
  912. return 0;
  913. }
  914. int efct_scsi_tgt_new_device(struct efct *efct)
  915. {
  916. u32 total_ios;
  917. /* Get the max settings */
  918. efct->tgt_efct.max_sge = sli_get_max_sge(&efct->hw.sli);
  919. efct->tgt_efct.max_sgl = sli_get_max_sgl(&efct->hw.sli);
  920. /* initialize IO watermark fields */
  921. atomic_set(&efct->tgt_efct.ios_in_use, 0);
  922. total_ios = efct->hw.config.n_io;
  923. efc_log_debug(efct, "total_ios=%d\n", total_ios);
  924. efct->tgt_efct.watermark_min =
  925. (total_ios * EFCT_WATERMARK_LOW_PCT) / 100;
  926. efct->tgt_efct.watermark_max =
  927. (total_ios * EFCT_WATERMARK_HIGH_PCT) / 100;
  928. atomic_set(&efct->tgt_efct.io_high_watermark,
  929. efct->tgt_efct.watermark_max);
  930. atomic_set(&efct->tgt_efct.watermark_hit, 0);
  931. atomic_set(&efct->tgt_efct.initiator_count, 0);
  932. lio_wq = create_singlethread_workqueue("efct_lio_worker");
  933. if (!lio_wq) {
  934. efc_log_err(efct, "workqueue create failed\n");
  935. return -EIO;
  936. }
  937. spin_lock_init(&efct->tgt_efct.efct_lio_lock);
  938. INIT_LIST_HEAD(&efct->tgt_efct.vport_list);
  939. return 0;
  940. }
  941. int efct_scsi_tgt_del_device(struct efct *efct)
  942. {
  943. flush_workqueue(lio_wq);
  944. return 0;
  945. }
  946. int
  947. efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport)
  948. {
  949. struct efct *efct = nport->efc->base;
  950. efc_log_debug(efct, "New SPORT: %s bound to %s\n", nport->display_name,
  951. efct->tgt_efct.lio_nport->wwpn_str);
  952. return 0;
  953. }
  954. void
  955. efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport)
  956. {
  957. efc_log_debug(efc, "Del SPORT: %s\n", nport->display_name);
  958. }
  959. static void efct_lio_setup_session(struct work_struct *work)
  960. {
  961. struct efct_lio_wq_data *wq_data =
  962. container_of(work, struct efct_lio_wq_data, work);
  963. struct efct *efct = wq_data->efct;
  964. struct efc_node *node = wq_data->ptr;
  965. char wwpn[WWN_NAME_LEN];
  966. struct efct_lio_tpg *tpg;
  967. struct efct_node *tgt_node;
  968. struct se_portal_group *se_tpg;
  969. struct se_session *se_sess;
  970. int watermark;
  971. int ini_count;
  972. u64 id;
  973. /* Check to see if it's belongs to vport,
  974. * if not get physical port
  975. */
  976. tpg = efct_get_vport_tpg(node);
  977. if (tpg) {
  978. se_tpg = &tpg->tpg;
  979. } else if (efct->tgt_efct.tpg) {
  980. tpg = efct->tgt_efct.tpg;
  981. se_tpg = &tpg->tpg;
  982. } else {
  983. efc_log_err(efct, "failed to init session\n");
  984. return;
  985. }
  986. /*
  987. * Format the FCP Initiator port_name into colon
  988. * separated values to match the format by our explicit
  989. * ConfigFS NodeACLs.
  990. */
  991. efct_format_wwn(wwpn, sizeof(wwpn), "", efc_node_get_wwpn(node));
  992. se_sess = target_setup_session(se_tpg, 0, 0, TARGET_PROT_NORMAL, wwpn,
  993. node, efct_session_cb);
  994. if (IS_ERR(se_sess)) {
  995. efc_log_err(efct, "failed to setup session\n");
  996. kfree(wq_data);
  997. efc_scsi_sess_reg_complete(node, -EIO);
  998. return;
  999. }
  1000. tgt_node = node->tgt_node;
  1001. id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
  1002. efc_log_debug(efct, "new initiator sess=%p node=%p id: %llx\n",
  1003. se_sess, node, id);
  1004. if (xa_err(xa_store(&efct->lookup, id, tgt_node, GFP_KERNEL)))
  1005. efc_log_err(efct, "Node lookup store failed\n");
  1006. efc_scsi_sess_reg_complete(node, 0);
  1007. /* update IO watermark: increment initiator count */
  1008. ini_count = atomic_add_return(1, &efct->tgt_efct.initiator_count);
  1009. watermark = efct->tgt_efct.watermark_max -
  1010. ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
  1011. watermark = (efct->tgt_efct.watermark_min > watermark) ?
  1012. efct->tgt_efct.watermark_min : watermark;
  1013. atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
  1014. kfree(wq_data);
  1015. }
  1016. int efct_scsi_new_initiator(struct efc *efc, struct efc_node *node)
  1017. {
  1018. struct efct *efct = node->efc->base;
  1019. struct efct_lio_wq_data *wq_data;
  1020. /*
  1021. * Since LIO only supports initiator validation at thread level,
  1022. * we are open minded and accept all callers.
  1023. */
  1024. wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
  1025. if (!wq_data)
  1026. return -ENOMEM;
  1027. wq_data->ptr = node;
  1028. wq_data->efct = efct;
  1029. INIT_WORK(&wq_data->work, efct_lio_setup_session);
  1030. queue_work(lio_wq, &wq_data->work);
  1031. return EFC_SCSI_CALL_ASYNC;
  1032. }
  1033. static void efct_lio_remove_session(struct work_struct *work)
  1034. {
  1035. struct efct_lio_wq_data *wq_data =
  1036. container_of(work, struct efct_lio_wq_data, work);
  1037. struct efct *efct = wq_data->efct;
  1038. struct efc_node *node = wq_data->ptr;
  1039. struct efct_node *tgt_node;
  1040. struct se_session *se_sess;
  1041. tgt_node = node->tgt_node;
  1042. if (!tgt_node) {
  1043. /* base driver has sent back-to-back requests
  1044. * to unreg session with no intervening
  1045. * register
  1046. */
  1047. efc_log_err(efct, "unreg session for NULL session\n");
  1048. efc_scsi_del_initiator_complete(node->efc, node);
  1049. return;
  1050. }
  1051. se_sess = tgt_node->session;
  1052. efc_log_debug(efct, "unreg session se_sess=%p node=%p\n",
  1053. se_sess, node);
  1054. /* first flag all session commands to complete */
  1055. target_stop_session(se_sess);
  1056. /* now wait for session commands to complete */
  1057. target_wait_for_sess_cmds(se_sess);
  1058. target_remove_session(se_sess);
  1059. tgt_node->session = NULL;
  1060. node->tgt_node = NULL;
  1061. kref_put(&tgt_node->ref, tgt_node->release);
  1062. kfree(wq_data);
  1063. }
  1064. int efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason)
  1065. {
  1066. struct efct *efct = node->efc->base;
  1067. struct efct_node *tgt_node = node->tgt_node;
  1068. struct efct_lio_wq_data *wq_data;
  1069. int watermark;
  1070. int ini_count;
  1071. u64 id;
  1072. if (reason == EFCT_SCSI_INITIATOR_MISSING)
  1073. return EFC_SCSI_CALL_COMPLETE;
  1074. if (!tgt_node) {
  1075. efc_log_err(efct, "tgt_node is NULL\n");
  1076. return -EIO;
  1077. }
  1078. wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC);
  1079. if (!wq_data)
  1080. return -ENOMEM;
  1081. id = (u64) tgt_node->port_fc_id << 32 | tgt_node->node_fc_id;
  1082. xa_erase(&efct->lookup, id);
  1083. wq_data->ptr = node;
  1084. wq_data->efct = efct;
  1085. INIT_WORK(&wq_data->work, efct_lio_remove_session);
  1086. queue_work(lio_wq, &wq_data->work);
  1087. /*
  1088. * update IO watermark: decrement initiator count
  1089. */
  1090. ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count);
  1091. watermark = efct->tgt_efct.watermark_max -
  1092. ini_count * EFCT_IO_WATERMARK_PER_INITIATOR;
  1093. watermark = (efct->tgt_efct.watermark_min > watermark) ?
  1094. efct->tgt_efct.watermark_min : watermark;
  1095. atomic_set(&efct->tgt_efct.io_high_watermark, watermark);
  1096. return EFC_SCSI_CALL_ASYNC;
  1097. }
  1098. void efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb,
  1099. u32 cdb_len, u32 flags)
  1100. {
  1101. struct efct_scsi_tgt_io *ocp = &io->tgt_io;
  1102. struct se_cmd *se_cmd = &io->tgt_io.cmd;
  1103. struct efct *efct = io->efct;
  1104. char *ddir;
  1105. struct efct_node *tgt_node;
  1106. struct se_session *se_sess;
  1107. int rc = 0;
  1108. memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
  1109. efct_set_lio_io_state(io, EFCT_LIO_STATE_SCSI_RECV_CMD);
  1110. atomic_add_return(1, &efct->tgt_efct.ios_in_use);
  1111. /* set target timeout */
  1112. io->timeout = efct->target_io_timer_sec;
  1113. if (flags & EFCT_SCSI_CMD_SIMPLE)
  1114. ocp->task_attr = TCM_SIMPLE_TAG;
  1115. else if (flags & EFCT_SCSI_CMD_HEAD_OF_QUEUE)
  1116. ocp->task_attr = TCM_HEAD_TAG;
  1117. else if (flags & EFCT_SCSI_CMD_ORDERED)
  1118. ocp->task_attr = TCM_ORDERED_TAG;
  1119. else if (flags & EFCT_SCSI_CMD_ACA)
  1120. ocp->task_attr = TCM_ACA_TAG;
  1121. switch (flags & (EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT)) {
  1122. case EFCT_SCSI_CMD_DIR_IN:
  1123. ddir = "FROM_INITIATOR";
  1124. ocp->ddir = DMA_TO_DEVICE;
  1125. break;
  1126. case EFCT_SCSI_CMD_DIR_OUT:
  1127. ddir = "TO_INITIATOR";
  1128. ocp->ddir = DMA_FROM_DEVICE;
  1129. break;
  1130. case EFCT_SCSI_CMD_DIR_IN | EFCT_SCSI_CMD_DIR_OUT:
  1131. ddir = "BIDIR";
  1132. ocp->ddir = DMA_BIDIRECTIONAL;
  1133. break;
  1134. default:
  1135. ddir = "NONE";
  1136. ocp->ddir = DMA_NONE;
  1137. break;
  1138. }
  1139. ocp->lun = lun;
  1140. efct_lio_io_printf(io, "new cmd=0x%x ddir=%s dl=%u\n",
  1141. cdb[0], ddir, io->exp_xfer_len);
  1142. tgt_node = io->node;
  1143. se_sess = tgt_node->session;
  1144. if (!se_sess) {
  1145. efc_log_err(efct, "No session found to submit IO se_cmd: %p\n",
  1146. &ocp->cmd);
  1147. efct_scsi_io_free(io);
  1148. return;
  1149. }
  1150. efct_set_lio_io_state(io, EFCT_LIO_STATE_TGT_SUBMIT_CMD);
  1151. rc = target_init_cmd(se_cmd, se_sess, &io->tgt_io.sense_buffer[0],
  1152. ocp->lun, io->exp_xfer_len, ocp->task_attr,
  1153. ocp->ddir, TARGET_SCF_ACK_KREF);
  1154. if (rc) {
  1155. efc_log_err(efct, "failed to init cmd se_cmd: %p\n", se_cmd);
  1156. efct_scsi_io_free(io);
  1157. return;
  1158. }
  1159. if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0,
  1160. NULL, 0, GFP_ATOMIC))
  1161. return;
  1162. target_submit(se_cmd);
  1163. }
  1164. int
  1165. efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
  1166. struct efct_io *io_to_abort, u32 flags)
  1167. {
  1168. unsigned char tmr_func;
  1169. struct efct *efct = tmfio->efct;
  1170. struct efct_scsi_tgt_io *ocp = &tmfio->tgt_io;
  1171. struct efct_node *tgt_node;
  1172. struct se_session *se_sess;
  1173. int rc;
  1174. memset(ocp, 0, sizeof(struct efct_scsi_tgt_io));
  1175. efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_SCSI_RECV_TMF);
  1176. atomic_add_return(1, &efct->tgt_efct.ios_in_use);
  1177. efct_lio_tmfio_printf(tmfio, "%s: new tmf %x lun=%u\n",
  1178. tmfio->display_name, cmd, lun);
  1179. switch (cmd) {
  1180. case EFCT_SCSI_TMF_ABORT_TASK:
  1181. tmr_func = TMR_ABORT_TASK;
  1182. break;
  1183. case EFCT_SCSI_TMF_ABORT_TASK_SET:
  1184. tmr_func = TMR_ABORT_TASK_SET;
  1185. break;
  1186. case EFCT_SCSI_TMF_CLEAR_TASK_SET:
  1187. tmr_func = TMR_CLEAR_TASK_SET;
  1188. break;
  1189. case EFCT_SCSI_TMF_LOGICAL_UNIT_RESET:
  1190. tmr_func = TMR_LUN_RESET;
  1191. break;
  1192. case EFCT_SCSI_TMF_CLEAR_ACA:
  1193. tmr_func = TMR_CLEAR_ACA;
  1194. break;
  1195. case EFCT_SCSI_TMF_TARGET_RESET:
  1196. tmr_func = TMR_TARGET_WARM_RESET;
  1197. break;
  1198. case EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT:
  1199. case EFCT_SCSI_TMF_QUERY_TASK_SET:
  1200. default:
  1201. goto tmf_fail;
  1202. }
  1203. tmfio->tgt_io.tmf = tmr_func;
  1204. tmfio->tgt_io.lun = lun;
  1205. tmfio->tgt_io.io_to_abort = io_to_abort;
  1206. tgt_node = tmfio->node;
  1207. se_sess = tgt_node->session;
  1208. if (!se_sess)
  1209. return 0;
  1210. rc = target_submit_tmr(&ocp->cmd, se_sess, NULL, lun, ocp, tmr_func,
  1211. GFP_ATOMIC, tmfio->init_task_tag, TARGET_SCF_ACK_KREF);
  1212. efct_set_lio_io_state(tmfio, EFCT_LIO_STATE_TGT_SUBMIT_TMR);
  1213. if (rc)
  1214. goto tmf_fail;
  1215. return 0;
  1216. tmf_fail:
  1217. efct_scsi_send_tmf_resp(tmfio, EFCT_SCSI_TMF_FUNCTION_REJECTED,
  1218. NULL, efct_lio_null_tmf_done, NULL);
  1219. return 0;
  1220. }
  1221. /* Start items for efct_lio_tpg_attrib_cit */
  1222. #define DEF_EFCT_TPG_ATTRIB(name) \
  1223. \
  1224. static ssize_t efct_lio_tpg_attrib_##name##_show( \
  1225. struct config_item *item, char *page) \
  1226. { \
  1227. struct se_portal_group *se_tpg = to_tpg(item); \
  1228. struct efct_lio_tpg *tpg = container_of(se_tpg, \
  1229. struct efct_lio_tpg, tpg); \
  1230. \
  1231. return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
  1232. } \
  1233. \
  1234. static ssize_t efct_lio_tpg_attrib_##name##_store( \
  1235. struct config_item *item, const char *page, size_t count) \
  1236. { \
  1237. struct se_portal_group *se_tpg = to_tpg(item); \
  1238. struct efct_lio_tpg *tpg = container_of(se_tpg, \
  1239. struct efct_lio_tpg, tpg); \
  1240. struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
  1241. unsigned long val; \
  1242. int ret; \
  1243. \
  1244. ret = kstrtoul(page, 0, &val); \
  1245. if (ret < 0) { \
  1246. pr_err("kstrtoul() failed with ret: %d\n", ret); \
  1247. return ret; \
  1248. } \
  1249. \
  1250. if (val != 0 && val != 1) { \
  1251. pr_err("Illegal boolean value %lu\n", val); \
  1252. return -EINVAL; \
  1253. } \
  1254. \
  1255. a->name = val; \
  1256. \
  1257. return count; \
  1258. } \
  1259. CONFIGFS_ATTR(efct_lio_tpg_attrib_, name)
  1260. DEF_EFCT_TPG_ATTRIB(generate_node_acls);
  1261. DEF_EFCT_TPG_ATTRIB(cache_dynamic_acls);
  1262. DEF_EFCT_TPG_ATTRIB(demo_mode_write_protect);
  1263. DEF_EFCT_TPG_ATTRIB(prod_mode_write_protect);
  1264. DEF_EFCT_TPG_ATTRIB(demo_mode_login_only);
  1265. DEF_EFCT_TPG_ATTRIB(session_deletion_wait);
  1266. static struct configfs_attribute *efct_lio_tpg_attrib_attrs[] = {
  1267. &efct_lio_tpg_attrib_attr_generate_node_acls,
  1268. &efct_lio_tpg_attrib_attr_cache_dynamic_acls,
  1269. &efct_lio_tpg_attrib_attr_demo_mode_write_protect,
  1270. &efct_lio_tpg_attrib_attr_prod_mode_write_protect,
  1271. &efct_lio_tpg_attrib_attr_demo_mode_login_only,
  1272. &efct_lio_tpg_attrib_attr_session_deletion_wait,
  1273. NULL,
  1274. };
  1275. #define DEF_EFCT_NPIV_TPG_ATTRIB(name) \
  1276. \
  1277. static ssize_t efct_lio_npiv_tpg_attrib_##name##_show( \
  1278. struct config_item *item, char *page) \
  1279. { \
  1280. struct se_portal_group *se_tpg = to_tpg(item); \
  1281. struct efct_lio_tpg *tpg = container_of(se_tpg, \
  1282. struct efct_lio_tpg, tpg); \
  1283. \
  1284. return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
  1285. } \
  1286. \
  1287. static ssize_t efct_lio_npiv_tpg_attrib_##name##_store( \
  1288. struct config_item *item, const char *page, size_t count) \
  1289. { \
  1290. struct se_portal_group *se_tpg = to_tpg(item); \
  1291. struct efct_lio_tpg *tpg = container_of(se_tpg, \
  1292. struct efct_lio_tpg, tpg); \
  1293. struct efct_lio_tpg_attrib *a = &tpg->tpg_attrib; \
  1294. unsigned long val; \
  1295. int ret; \
  1296. \
  1297. ret = kstrtoul(page, 0, &val); \
  1298. if (ret < 0) { \
  1299. pr_err("kstrtoul() failed with ret: %d\n", ret); \
  1300. return ret; \
  1301. } \
  1302. \
  1303. if (val != 0 && val != 1) { \
  1304. pr_err("Illegal boolean value %lu\n", val); \
  1305. return -EINVAL; \
  1306. } \
  1307. \
  1308. a->name = val; \
  1309. \
  1310. return count; \
  1311. } \
  1312. CONFIGFS_ATTR(efct_lio_npiv_tpg_attrib_, name)
  1313. DEF_EFCT_NPIV_TPG_ATTRIB(generate_node_acls);
  1314. DEF_EFCT_NPIV_TPG_ATTRIB(cache_dynamic_acls);
  1315. DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_write_protect);
  1316. DEF_EFCT_NPIV_TPG_ATTRIB(prod_mode_write_protect);
  1317. DEF_EFCT_NPIV_TPG_ATTRIB(demo_mode_login_only);
  1318. DEF_EFCT_NPIV_TPG_ATTRIB(session_deletion_wait);
  1319. static struct configfs_attribute *efct_lio_npiv_tpg_attrib_attrs[] = {
  1320. &efct_lio_npiv_tpg_attrib_attr_generate_node_acls,
  1321. &efct_lio_npiv_tpg_attrib_attr_cache_dynamic_acls,
  1322. &efct_lio_npiv_tpg_attrib_attr_demo_mode_write_protect,
  1323. &efct_lio_npiv_tpg_attrib_attr_prod_mode_write_protect,
  1324. &efct_lio_npiv_tpg_attrib_attr_demo_mode_login_only,
  1325. &efct_lio_npiv_tpg_attrib_attr_session_deletion_wait,
  1326. NULL,
  1327. };
  1328. CONFIGFS_ATTR(efct_lio_tpg_, enable);
  1329. static struct configfs_attribute *efct_lio_tpg_attrs[] = {
  1330. &efct_lio_tpg_attr_enable, NULL };
  1331. CONFIGFS_ATTR(efct_lio_npiv_tpg_, enable);
  1332. static struct configfs_attribute *efct_lio_npiv_tpg_attrs[] = {
  1333. &efct_lio_npiv_tpg_attr_enable, NULL };
  1334. static const struct target_core_fabric_ops efct_lio_ops = {
  1335. .module = THIS_MODULE,
  1336. .fabric_name = "efct",
  1337. .node_acl_size = sizeof(struct efct_lio_nacl),
  1338. .max_data_sg_nents = 65535,
  1339. .tpg_get_wwn = efct_lio_get_fabric_wwn,
  1340. .tpg_get_tag = efct_lio_get_tag,
  1341. .fabric_init_nodeacl = efct_lio_init_nodeacl,
  1342. .tpg_check_demo_mode = efct_lio_check_demo_mode,
  1343. .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
  1344. .tpg_check_demo_mode_write_protect = efct_lio_check_demo_write_protect,
  1345. .tpg_check_prod_mode_write_protect = efct_lio_check_prod_write_protect,
  1346. .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
  1347. .check_stop_free = efct_lio_check_stop_free,
  1348. .aborted_task = efct_lio_aborted_task,
  1349. .release_cmd = efct_lio_release_cmd,
  1350. .close_session = efct_lio_close_session,
  1351. .sess_get_index = efct_lio_sess_get_index,
  1352. .write_pending = efct_lio_write_pending,
  1353. .set_default_node_attributes = efct_lio_set_default_node_attrs,
  1354. .get_cmd_state = efct_lio_get_cmd_state,
  1355. .queue_data_in = efct_lio_queue_data_in,
  1356. .queue_status = efct_lio_queue_status,
  1357. .queue_tm_rsp = efct_lio_queue_tm_rsp,
  1358. .fabric_make_wwn = efct_lio_make_nport,
  1359. .fabric_drop_wwn = efct_lio_drop_nport,
  1360. .fabric_make_tpg = efct_lio_make_tpg,
  1361. .fabric_drop_tpg = efct_lio_drop_tpg,
  1362. .tpg_check_demo_mode_login_only = efct_lio_check_demo_mode_login_only,
  1363. .tpg_check_prot_fabric_only = NULL,
  1364. .sess_get_initiator_sid = NULL,
  1365. .tfc_tpg_base_attrs = efct_lio_tpg_attrs,
  1366. .tfc_tpg_attrib_attrs = efct_lio_tpg_attrib_attrs,
  1367. };
  1368. static const struct target_core_fabric_ops efct_lio_npiv_ops = {
  1369. .module = THIS_MODULE,
  1370. .fabric_name = "efct_npiv",
  1371. .node_acl_size = sizeof(struct efct_lio_nacl),
  1372. .max_data_sg_nents = 65535,
  1373. .tpg_get_wwn = efct_lio_get_npiv_fabric_wwn,
  1374. .tpg_get_tag = efct_lio_get_npiv_tag,
  1375. .fabric_init_nodeacl = efct_lio_init_nodeacl,
  1376. .tpg_check_demo_mode = efct_lio_check_demo_mode,
  1377. .tpg_check_demo_mode_cache = efct_lio_check_demo_mode_cache,
  1378. .tpg_check_demo_mode_write_protect =
  1379. efct_lio_npiv_check_demo_write_protect,
  1380. .tpg_check_prod_mode_write_protect =
  1381. efct_lio_npiv_check_prod_write_protect,
  1382. .tpg_get_inst_index = efct_lio_tpg_get_inst_index,
  1383. .check_stop_free = efct_lio_check_stop_free,
  1384. .aborted_task = efct_lio_aborted_task,
  1385. .release_cmd = efct_lio_release_cmd,
  1386. .close_session = efct_lio_close_session,
  1387. .sess_get_index = efct_lio_sess_get_index,
  1388. .write_pending = efct_lio_write_pending,
  1389. .set_default_node_attributes = efct_lio_set_default_node_attrs,
  1390. .get_cmd_state = efct_lio_get_cmd_state,
  1391. .queue_data_in = efct_lio_queue_data_in,
  1392. .queue_status = efct_lio_queue_status,
  1393. .queue_tm_rsp = efct_lio_queue_tm_rsp,
  1394. .fabric_make_wwn = efct_lio_npiv_make_nport,
  1395. .fabric_drop_wwn = efct_lio_npiv_drop_nport,
  1396. .fabric_make_tpg = efct_lio_npiv_make_tpg,
  1397. .fabric_drop_tpg = efct_lio_npiv_drop_tpg,
  1398. .tpg_check_demo_mode_login_only =
  1399. efct_lio_npiv_check_demo_mode_login_only,
  1400. .tpg_check_prot_fabric_only = NULL,
  1401. .sess_get_initiator_sid = NULL,
  1402. .tfc_tpg_base_attrs = efct_lio_npiv_tpg_attrs,
  1403. .tfc_tpg_attrib_attrs = efct_lio_npiv_tpg_attrib_attrs,
  1404. };
  1405. int efct_scsi_tgt_driver_init(void)
  1406. {
  1407. int rc;
  1408. /* Register the top level struct config_item_type with TCM core */
  1409. rc = target_register_template(&efct_lio_ops);
  1410. if (rc < 0) {
  1411. pr_err("target_fabric_configfs_register failed with %d\n", rc);
  1412. return rc;
  1413. }
  1414. rc = target_register_template(&efct_lio_npiv_ops);
  1415. if (rc < 0) {
  1416. pr_err("target_fabric_configfs_register failed with %d\n", rc);
  1417. target_unregister_template(&efct_lio_ops);
  1418. return rc;
  1419. }
  1420. return 0;
  1421. }
  1422. int efct_scsi_tgt_driver_exit(void)
  1423. {
  1424. target_unregister_template(&efct_lio_ops);
  1425. target_unregister_template(&efct_lio_npiv_ops);
  1426. return 0;
  1427. }