tcm_loop.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197
  1. /*******************************************************************************
  2. *
  3. * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
  4. * for emulated SAS initiator ports
  5. *
  6. * © Copyright 2011-2013 Datera, Inc.
  7. *
  8. * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
  9. *
  10. * Author: Nicholas A. Bellinger <[email protected]>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. ****************************************************************************/
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/init.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include <linux/configfs.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_fabric.h>
  35. #include "tcm_loop.h"
  36. #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
  37. static struct kmem_cache *tcm_loop_cmd_cache;
  38. static int tcm_loop_hba_no_cnt;
  39. static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  40. static unsigned int tcm_loop_nr_hw_queues = 1;
  41. module_param_named(nr_hw_queues, tcm_loop_nr_hw_queues, uint, 0644);
  42. static unsigned int tcm_loop_can_queue = 1024;
  43. module_param_named(can_queue, tcm_loop_can_queue, uint, 0644);
  44. static unsigned int tcm_loop_cmd_per_lun = 1024;
  45. module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644);
  46. /*
  47. * Called from struct target_core_fabric_ops->check_stop_free()
  48. */
  49. static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  50. {
  51. return transport_generic_free_cmd(se_cmd, 0);
  52. }
  53. static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  54. {
  55. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  56. struct tcm_loop_cmd, tl_se_cmd);
  57. struct scsi_cmnd *sc = tl_cmd->sc;
  58. if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  59. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  60. else
  61. scsi_done(sc);
  62. }
  63. static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  64. {
  65. seq_puts(m, "tcm_loop_proc_info()\n");
  66. return 0;
  67. }
  68. static int tcm_loop_driver_probe(struct device *);
  69. static void tcm_loop_driver_remove(struct device *);
  70. static int pseudo_lld_bus_match(struct device *dev,
  71. struct device_driver *dev_driver)
  72. {
  73. return 1;
  74. }
  75. static struct bus_type tcm_loop_lld_bus = {
  76. .name = "tcm_loop_bus",
  77. .match = pseudo_lld_bus_match,
  78. .probe = tcm_loop_driver_probe,
  79. .remove = tcm_loop_driver_remove,
  80. };
  81. static struct device_driver tcm_loop_driverfs = {
  82. .name = "tcm_loop",
  83. .bus = &tcm_loop_lld_bus,
  84. };
  85. /*
  86. * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  87. */
  88. static struct device *tcm_loop_primary;
  89. static void tcm_loop_target_queue_cmd(struct tcm_loop_cmd *tl_cmd)
  90. {
  91. struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
  92. struct scsi_cmnd *sc = tl_cmd->sc;
  93. struct tcm_loop_nexus *tl_nexus;
  94. struct tcm_loop_hba *tl_hba;
  95. struct tcm_loop_tpg *tl_tpg;
  96. struct scatterlist *sgl_bidi = NULL;
  97. u32 sgl_bidi_count = 0, transfer_length;
  98. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  99. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  100. /*
  101. * Ensure that this tl_tpg reference from the incoming sc->device->id
  102. * has already been configured via tcm_loop_make_naa_tpg().
  103. */
  104. if (!tl_tpg->tl_hba) {
  105. set_host_byte(sc, DID_NO_CONNECT);
  106. goto out_done;
  107. }
  108. if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
  109. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  110. goto out_done;
  111. }
  112. tl_nexus = tl_tpg->tl_nexus;
  113. if (!tl_nexus) {
  114. scmd_printk(KERN_ERR, sc,
  115. "TCM_Loop I_T Nexus does not exist\n");
  116. set_host_byte(sc, DID_ERROR);
  117. goto out_done;
  118. }
  119. transfer_length = scsi_transfer_length(sc);
  120. if (!scsi_prot_sg_count(sc) &&
  121. scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
  122. se_cmd->prot_pto = true;
  123. /*
  124. * loopback transport doesn't support
  125. * WRITE_GENERATE, READ_STRIP protection
  126. * information operations, go ahead unprotected.
  127. */
  128. transfer_length = scsi_bufflen(sc);
  129. }
  130. se_cmd->tag = tl_cmd->sc_cmd_tag;
  131. target_init_cmd(se_cmd, tl_nexus->se_sess, &tl_cmd->tl_sense_buf[0],
  132. tl_cmd->sc->device->lun, transfer_length,
  133. TCM_SIMPLE_TAG, sc->sc_data_direction, 0);
  134. if (target_submit_prep(se_cmd, sc->cmnd, scsi_sglist(sc),
  135. scsi_sg_count(sc), sgl_bidi, sgl_bidi_count,
  136. scsi_prot_sglist(sc), scsi_prot_sg_count(sc),
  137. GFP_ATOMIC))
  138. return;
  139. target_queue_submission(se_cmd);
  140. return;
  141. out_done:
  142. scsi_done(sc);
  143. }
  144. /*
  145. * ->queuecommand can be and usually is called from interrupt context, so
  146. * defer the actual submission to a workqueue.
  147. */
  148. static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
  149. {
  150. struct tcm_loop_cmd *tl_cmd = scsi_cmd_priv(sc);
  151. pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
  152. __func__, sc->device->host->host_no, sc->device->id,
  153. sc->device->channel, sc->device->lun, sc->cmnd[0],
  154. scsi_bufflen(sc));
  155. memset(tl_cmd, 0, sizeof(*tl_cmd));
  156. tl_cmd->sc = sc;
  157. tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag;
  158. tcm_loop_target_queue_cmd(tl_cmd);
  159. return 0;
  160. }
  161. /*
  162. * Called from SCSI EH process context to issue a LUN_RESET TMR
  163. * to struct scsi_device
  164. */
  165. static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
  166. u64 lun, int task, enum tcm_tmreq_table tmr)
  167. {
  168. struct se_cmd *se_cmd;
  169. struct se_session *se_sess;
  170. struct tcm_loop_nexus *tl_nexus;
  171. struct tcm_loop_cmd *tl_cmd;
  172. int ret = TMR_FUNCTION_FAILED, rc;
  173. /*
  174. * Locate the tl_nexus and se_sess pointers
  175. */
  176. tl_nexus = tl_tpg->tl_nexus;
  177. if (!tl_nexus) {
  178. pr_err("Unable to perform device reset without active I_T Nexus\n");
  179. return ret;
  180. }
  181. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
  182. if (!tl_cmd)
  183. return ret;
  184. init_completion(&tl_cmd->tmr_done);
  185. se_cmd = &tl_cmd->tl_se_cmd;
  186. se_sess = tl_tpg->tl_nexus->se_sess;
  187. rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
  188. NULL, tmr, GFP_KERNEL, task,
  189. TARGET_SCF_ACK_KREF);
  190. if (rc < 0)
  191. goto release;
  192. wait_for_completion(&tl_cmd->tmr_done);
  193. ret = se_cmd->se_tmr_req->response;
  194. target_put_sess_cmd(se_cmd);
  195. out:
  196. return ret;
  197. release:
  198. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  199. goto out;
  200. }
  201. static int tcm_loop_abort_task(struct scsi_cmnd *sc)
  202. {
  203. struct tcm_loop_hba *tl_hba;
  204. struct tcm_loop_tpg *tl_tpg;
  205. int ret;
  206. /*
  207. * Locate the tcm_loop_hba_t pointer
  208. */
  209. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  210. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  211. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  212. scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK);
  213. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  214. }
  215. /*
  216. * Called from SCSI EH process context to issue a LUN_RESET TMR
  217. * to struct scsi_device
  218. */
  219. static int tcm_loop_device_reset(struct scsi_cmnd *sc)
  220. {
  221. struct tcm_loop_hba *tl_hba;
  222. struct tcm_loop_tpg *tl_tpg;
  223. int ret;
  224. /*
  225. * Locate the tcm_loop_hba_t pointer
  226. */
  227. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  228. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  229. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  230. 0, TMR_LUN_RESET);
  231. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  232. }
  233. static int tcm_loop_target_reset(struct scsi_cmnd *sc)
  234. {
  235. struct tcm_loop_hba *tl_hba;
  236. struct tcm_loop_tpg *tl_tpg;
  237. /*
  238. * Locate the tcm_loop_hba_t pointer
  239. */
  240. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  241. if (!tl_hba) {
  242. pr_err("Unable to perform device reset without active I_T Nexus\n");
  243. return FAILED;
  244. }
  245. /*
  246. * Locate the tl_tpg pointer from TargetID in sc->device->id
  247. */
  248. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  249. if (tl_tpg) {
  250. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  251. return SUCCESS;
  252. }
  253. return FAILED;
  254. }
  255. static struct scsi_host_template tcm_loop_driver_template = {
  256. .show_info = tcm_loop_show_info,
  257. .proc_name = "tcm_loopback",
  258. .name = "TCM_Loopback",
  259. .queuecommand = tcm_loop_queuecommand,
  260. .change_queue_depth = scsi_change_queue_depth,
  261. .eh_abort_handler = tcm_loop_abort_task,
  262. .eh_device_reset_handler = tcm_loop_device_reset,
  263. .eh_target_reset_handler = tcm_loop_target_reset,
  264. .this_id = -1,
  265. .sg_tablesize = 256,
  266. .max_sectors = 0xFFFF,
  267. .dma_boundary = PAGE_SIZE - 1,
  268. .module = THIS_MODULE,
  269. .track_queue_depth = 1,
  270. .cmd_size = sizeof(struct tcm_loop_cmd),
  271. };
  272. static int tcm_loop_driver_probe(struct device *dev)
  273. {
  274. struct tcm_loop_hba *tl_hba;
  275. struct Scsi_Host *sh;
  276. int error, host_prot;
  277. tl_hba = to_tcm_loop_hba(dev);
  278. sh = scsi_host_alloc(&tcm_loop_driver_template,
  279. sizeof(struct tcm_loop_hba));
  280. if (!sh) {
  281. pr_err("Unable to allocate struct scsi_host\n");
  282. return -ENODEV;
  283. }
  284. tl_hba->sh = sh;
  285. /*
  286. * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
  287. */
  288. *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
  289. /*
  290. * Setup single ID, Channel and LUN for now..
  291. */
  292. sh->max_id = 2;
  293. sh->max_lun = 0;
  294. sh->max_channel = 0;
  295. sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
  296. sh->nr_hw_queues = tcm_loop_nr_hw_queues;
  297. sh->can_queue = tcm_loop_can_queue;
  298. sh->cmd_per_lun = tcm_loop_cmd_per_lun;
  299. host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
  300. SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
  301. SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
  302. scsi_host_set_prot(sh, host_prot);
  303. scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
  304. error = scsi_add_host(sh, &tl_hba->dev);
  305. if (error) {
  306. pr_err("%s: scsi_add_host failed\n", __func__);
  307. scsi_host_put(sh);
  308. return -ENODEV;
  309. }
  310. return 0;
  311. }
  312. static void tcm_loop_driver_remove(struct device *dev)
  313. {
  314. struct tcm_loop_hba *tl_hba;
  315. struct Scsi_Host *sh;
  316. tl_hba = to_tcm_loop_hba(dev);
  317. sh = tl_hba->sh;
  318. scsi_remove_host(sh);
  319. scsi_host_put(sh);
  320. }
  321. static void tcm_loop_release_adapter(struct device *dev)
  322. {
  323. struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
  324. kfree(tl_hba);
  325. }
  326. /*
  327. * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
  328. */
  329. static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
  330. {
  331. int ret;
  332. tl_hba->dev.bus = &tcm_loop_lld_bus;
  333. tl_hba->dev.parent = tcm_loop_primary;
  334. tl_hba->dev.release = &tcm_loop_release_adapter;
  335. dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
  336. ret = device_register(&tl_hba->dev);
  337. if (ret) {
  338. pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
  339. put_device(&tl_hba->dev);
  340. return -ENODEV;
  341. }
  342. return 0;
  343. }
  344. /*
  345. * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
  346. * tcm_loop SCSI bus.
  347. */
  348. static int tcm_loop_alloc_core_bus(void)
  349. {
  350. int ret;
  351. tcm_loop_primary = root_device_register("tcm_loop_0");
  352. if (IS_ERR(tcm_loop_primary)) {
  353. pr_err("Unable to allocate tcm_loop_primary\n");
  354. return PTR_ERR(tcm_loop_primary);
  355. }
  356. ret = bus_register(&tcm_loop_lld_bus);
  357. if (ret) {
  358. pr_err("bus_register() failed for tcm_loop_lld_bus\n");
  359. goto dev_unreg;
  360. }
  361. ret = driver_register(&tcm_loop_driverfs);
  362. if (ret) {
  363. pr_err("driver_register() failed for tcm_loop_driverfs\n");
  364. goto bus_unreg;
  365. }
  366. pr_debug("Initialized TCM Loop Core Bus\n");
  367. return ret;
  368. bus_unreg:
  369. bus_unregister(&tcm_loop_lld_bus);
  370. dev_unreg:
  371. root_device_unregister(tcm_loop_primary);
  372. return ret;
  373. }
  374. static void tcm_loop_release_core_bus(void)
  375. {
  376. driver_unregister(&tcm_loop_driverfs);
  377. bus_unregister(&tcm_loop_lld_bus);
  378. root_device_unregister(tcm_loop_primary);
  379. pr_debug("Releasing TCM Loop Core BUS\n");
  380. }
  381. static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
  382. {
  383. return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  384. }
  385. static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
  386. {
  387. /*
  388. * Return the passed NAA identifier for the Target Port
  389. */
  390. return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
  391. }
  392. static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
  393. {
  394. /*
  395. * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
  396. * to represent the SCSI Target Port.
  397. */
  398. return tl_tpg(se_tpg)->tl_tpgt;
  399. }
  400. /*
  401. * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
  402. * based upon the incoming fabric dependent SCSI Initiator Port
  403. */
  404. static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
  405. {
  406. return 1;
  407. }
  408. static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
  409. {
  410. return 0;
  411. }
  412. /*
  413. * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
  414. * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
  415. */
  416. static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
  417. {
  418. return 0;
  419. }
  420. /*
  421. * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
  422. * never be called for TCM_Loop by target_core_fabric_configfs.c code.
  423. * It has been added here as a nop for target_fabric_tf_ops_check()
  424. */
  425. static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
  426. {
  427. return 0;
  428. }
  429. static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
  430. {
  431. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  432. tl_se_tpg);
  433. return tl_tpg->tl_fabric_prot_type;
  434. }
  435. static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
  436. {
  437. return 1;
  438. }
  439. static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
  440. {
  441. return 1;
  442. }
  443. static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
  444. {
  445. return;
  446. }
  447. static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
  448. {
  449. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  450. struct tcm_loop_cmd, tl_se_cmd);
  451. return tl_cmd->sc_cmd_state;
  452. }
  453. static int tcm_loop_write_pending(struct se_cmd *se_cmd)
  454. {
  455. /*
  456. * Since Linux/SCSI has already sent down a struct scsi_cmnd
  457. * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
  458. * memory, and memory has already been mapped to struct se_cmd->t_mem_list
  459. * format with transport_generic_map_mem_to_cmd().
  460. *
  461. * We now tell TCM to add this WRITE CDB directly into the TCM storage
  462. * object execution queue.
  463. */
  464. target_execute_cmd(se_cmd);
  465. return 0;
  466. }
  467. static int tcm_loop_queue_data_or_status(const char *func,
  468. struct se_cmd *se_cmd, u8 scsi_status)
  469. {
  470. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  471. struct tcm_loop_cmd, tl_se_cmd);
  472. struct scsi_cmnd *sc = tl_cmd->sc;
  473. pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
  474. func, sc, sc->cmnd[0]);
  475. if (se_cmd->sense_buffer &&
  476. ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
  477. (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
  478. memcpy(sc->sense_buffer, se_cmd->sense_buffer,
  479. SCSI_SENSE_BUFFERSIZE);
  480. sc->result = SAM_STAT_CHECK_CONDITION;
  481. } else
  482. sc->result = scsi_status;
  483. set_host_byte(sc, DID_OK);
  484. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  485. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  486. scsi_set_resid(sc, se_cmd->residual_count);
  487. return 0;
  488. }
  489. static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
  490. {
  491. return tcm_loop_queue_data_or_status(__func__, se_cmd, SAM_STAT_GOOD);
  492. }
  493. static int tcm_loop_queue_status(struct se_cmd *se_cmd)
  494. {
  495. return tcm_loop_queue_data_or_status(__func__,
  496. se_cmd, se_cmd->scsi_status);
  497. }
  498. static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
  499. {
  500. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  501. struct tcm_loop_cmd, tl_se_cmd);
  502. /* Wake up tcm_loop_issue_tmr(). */
  503. complete(&tl_cmd->tmr_done);
  504. }
  505. static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
  506. {
  507. return;
  508. }
  509. static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
  510. {
  511. switch (tl_hba->tl_proto_id) {
  512. case SCSI_PROTOCOL_SAS:
  513. return "SAS";
  514. case SCSI_PROTOCOL_FCP:
  515. return "FCP";
  516. case SCSI_PROTOCOL_ISCSI:
  517. return "iSCSI";
  518. default:
  519. break;
  520. }
  521. return "Unknown";
  522. }
  523. /* Start items for tcm_loop_port_cit */
  524. static int tcm_loop_port_link(
  525. struct se_portal_group *se_tpg,
  526. struct se_lun *lun)
  527. {
  528. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  529. struct tcm_loop_tpg, tl_se_tpg);
  530. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  531. atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
  532. /*
  533. * Add Linux/SCSI struct scsi_device by HCTL
  534. */
  535. scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
  536. pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
  537. return 0;
  538. }
  539. static void tcm_loop_port_unlink(
  540. struct se_portal_group *se_tpg,
  541. struct se_lun *se_lun)
  542. {
  543. struct scsi_device *sd;
  544. struct tcm_loop_hba *tl_hba;
  545. struct tcm_loop_tpg *tl_tpg;
  546. tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  547. tl_hba = tl_tpg->tl_hba;
  548. sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
  549. se_lun->unpacked_lun);
  550. if (!sd) {
  551. pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
  552. 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
  553. return;
  554. }
  555. /*
  556. * Remove Linux/SCSI struct scsi_device by HCTL
  557. */
  558. scsi_remove_device(sd);
  559. scsi_device_put(sd);
  560. atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
  561. pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
  562. }
  563. /* End items for tcm_loop_port_cit */
  564. static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
  565. struct config_item *item, char *page)
  566. {
  567. struct se_portal_group *se_tpg = attrib_to_tpg(item);
  568. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  569. tl_se_tpg);
  570. return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
  571. }
  572. static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
  573. struct config_item *item, const char *page, size_t count)
  574. {
  575. struct se_portal_group *se_tpg = attrib_to_tpg(item);
  576. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  577. tl_se_tpg);
  578. unsigned long val;
  579. int ret = kstrtoul(page, 0, &val);
  580. if (ret) {
  581. pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
  582. return ret;
  583. }
  584. if (val != 0 && val != 1 && val != 3) {
  585. pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
  586. return -EINVAL;
  587. }
  588. tl_tpg->tl_fabric_prot_type = val;
  589. return count;
  590. }
  591. CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
  592. static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
  593. &tcm_loop_tpg_attrib_attr_fabric_prot_type,
  594. NULL,
  595. };
  596. /* Start items for tcm_loop_nexus_cit */
  597. static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
  598. struct se_session *se_sess, void *p)
  599. {
  600. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  601. struct tcm_loop_tpg, tl_se_tpg);
  602. tl_tpg->tl_nexus = p;
  603. return 0;
  604. }
  605. static int tcm_loop_make_nexus(
  606. struct tcm_loop_tpg *tl_tpg,
  607. const char *name)
  608. {
  609. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  610. struct tcm_loop_nexus *tl_nexus;
  611. int ret;
  612. if (tl_tpg->tl_nexus) {
  613. pr_debug("tl_tpg->tl_nexus already exists\n");
  614. return -EEXIST;
  615. }
  616. tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
  617. if (!tl_nexus)
  618. return -ENOMEM;
  619. tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0,
  620. TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
  621. name, tl_nexus, tcm_loop_alloc_sess_cb);
  622. if (IS_ERR(tl_nexus->se_sess)) {
  623. ret = PTR_ERR(tl_nexus->se_sess);
  624. kfree(tl_nexus);
  625. return ret;
  626. }
  627. pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
  628. tcm_loop_dump_proto_id(tl_hba), name);
  629. return 0;
  630. }
  631. static int tcm_loop_drop_nexus(
  632. struct tcm_loop_tpg *tpg)
  633. {
  634. struct se_session *se_sess;
  635. struct tcm_loop_nexus *tl_nexus;
  636. tl_nexus = tpg->tl_nexus;
  637. if (!tl_nexus)
  638. return -ENODEV;
  639. se_sess = tl_nexus->se_sess;
  640. if (!se_sess)
  641. return -ENODEV;
  642. if (atomic_read(&tpg->tl_tpg_port_count)) {
  643. pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
  644. atomic_read(&tpg->tl_tpg_port_count));
  645. return -EPERM;
  646. }
  647. pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
  648. tcm_loop_dump_proto_id(tpg->tl_hba),
  649. tl_nexus->se_sess->se_node_acl->initiatorname);
  650. /*
  651. * Release the SCSI I_T Nexus to the emulated Target Port
  652. */
  653. target_remove_session(se_sess);
  654. tpg->tl_nexus = NULL;
  655. kfree(tl_nexus);
  656. return 0;
  657. }
  658. /* End items for tcm_loop_nexus_cit */
  659. static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
  660. {
  661. struct se_portal_group *se_tpg = to_tpg(item);
  662. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  663. struct tcm_loop_tpg, tl_se_tpg);
  664. struct tcm_loop_nexus *tl_nexus;
  665. ssize_t ret;
  666. tl_nexus = tl_tpg->tl_nexus;
  667. if (!tl_nexus)
  668. return -ENODEV;
  669. ret = snprintf(page, PAGE_SIZE, "%s\n",
  670. tl_nexus->se_sess->se_node_acl->initiatorname);
  671. return ret;
  672. }
  673. static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
  674. const char *page, size_t count)
  675. {
  676. struct se_portal_group *se_tpg = to_tpg(item);
  677. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  678. struct tcm_loop_tpg, tl_se_tpg);
  679. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  680. unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
  681. int ret;
  682. /*
  683. * Shutdown the active I_T nexus if 'NULL' is passed..
  684. */
  685. if (!strncmp(page, "NULL", 4)) {
  686. ret = tcm_loop_drop_nexus(tl_tpg);
  687. return (!ret) ? count : ret;
  688. }
  689. /*
  690. * Otherwise make sure the passed virtual Initiator port WWN matches
  691. * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
  692. * tcm_loop_make_nexus()
  693. */
  694. if (strlen(page) >= TL_WWN_ADDR_LEN) {
  695. pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
  696. page, TL_WWN_ADDR_LEN);
  697. return -EINVAL;
  698. }
  699. snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
  700. ptr = strstr(i_port, "naa.");
  701. if (ptr) {
  702. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
  703. pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
  704. i_port, tcm_loop_dump_proto_id(tl_hba));
  705. return -EINVAL;
  706. }
  707. port_ptr = &i_port[0];
  708. goto check_newline;
  709. }
  710. ptr = strstr(i_port, "fc.");
  711. if (ptr) {
  712. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
  713. pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
  714. i_port, tcm_loop_dump_proto_id(tl_hba));
  715. return -EINVAL;
  716. }
  717. port_ptr = &i_port[3]; /* Skip over "fc." */
  718. goto check_newline;
  719. }
  720. ptr = strstr(i_port, "iqn.");
  721. if (ptr) {
  722. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
  723. pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
  724. i_port, tcm_loop_dump_proto_id(tl_hba));
  725. return -EINVAL;
  726. }
  727. port_ptr = &i_port[0];
  728. goto check_newline;
  729. }
  730. pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
  731. i_port);
  732. return -EINVAL;
  733. /*
  734. * Clear any trailing newline for the NAA WWN
  735. */
  736. check_newline:
  737. if (i_port[strlen(i_port)-1] == '\n')
  738. i_port[strlen(i_port)-1] = '\0';
  739. ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
  740. if (ret < 0)
  741. return ret;
  742. return count;
  743. }
  744. static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
  745. char *page)
  746. {
  747. struct se_portal_group *se_tpg = to_tpg(item);
  748. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  749. struct tcm_loop_tpg, tl_se_tpg);
  750. const char *status = NULL;
  751. ssize_t ret = -EINVAL;
  752. switch (tl_tpg->tl_transport_status) {
  753. case TCM_TRANSPORT_ONLINE:
  754. status = "online";
  755. break;
  756. case TCM_TRANSPORT_OFFLINE:
  757. status = "offline";
  758. break;
  759. default:
  760. break;
  761. }
  762. if (status)
  763. ret = snprintf(page, PAGE_SIZE, "%s\n", status);
  764. return ret;
  765. }
  766. static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
  767. const char *page, size_t count)
  768. {
  769. struct se_portal_group *se_tpg = to_tpg(item);
  770. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  771. struct tcm_loop_tpg, tl_se_tpg);
  772. if (!strncmp(page, "online", 6)) {
  773. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  774. return count;
  775. }
  776. if (!strncmp(page, "offline", 7)) {
  777. tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
  778. if (tl_tpg->tl_nexus) {
  779. struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
  780. core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
  781. }
  782. return count;
  783. }
  784. return -EINVAL;
  785. }
  786. static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
  787. char *page)
  788. {
  789. struct se_portal_group *se_tpg = to_tpg(item);
  790. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  791. struct tcm_loop_tpg, tl_se_tpg);
  792. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  793. return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
  794. tl_hba->sh->host_no, tl_tpg->tl_tpgt);
  795. }
  796. CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
  797. CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
  798. CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
  799. static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
  800. &tcm_loop_tpg_attr_nexus,
  801. &tcm_loop_tpg_attr_transport_status,
  802. &tcm_loop_tpg_attr_address,
  803. NULL,
  804. };
  805. /* Start items for tcm_loop_naa_cit */
  806. static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn,
  807. const char *name)
  808. {
  809. struct tcm_loop_hba *tl_hba = container_of(wwn,
  810. struct tcm_loop_hba, tl_hba_wwn);
  811. struct tcm_loop_tpg *tl_tpg;
  812. int ret;
  813. unsigned long tpgt;
  814. if (strstr(name, "tpgt_") != name) {
  815. pr_err("Unable to locate \"tpgt_#\" directory group\n");
  816. return ERR_PTR(-EINVAL);
  817. }
  818. if (kstrtoul(name+5, 10, &tpgt))
  819. return ERR_PTR(-EINVAL);
  820. if (tpgt >= TL_TPGS_PER_HBA) {
  821. pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
  822. tpgt, TL_TPGS_PER_HBA);
  823. return ERR_PTR(-EINVAL);
  824. }
  825. tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
  826. tl_tpg->tl_hba = tl_hba;
  827. tl_tpg->tl_tpgt = tpgt;
  828. /*
  829. * Register the tl_tpg as a emulated TCM Target Endpoint
  830. */
  831. ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
  832. if (ret < 0)
  833. return ERR_PTR(-ENOMEM);
  834. pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
  835. tcm_loop_dump_proto_id(tl_hba),
  836. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  837. return &tl_tpg->tl_se_tpg;
  838. }
  839. static void tcm_loop_drop_naa_tpg(
  840. struct se_portal_group *se_tpg)
  841. {
  842. struct se_wwn *wwn = se_tpg->se_tpg_wwn;
  843. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  844. struct tcm_loop_tpg, tl_se_tpg);
  845. struct tcm_loop_hba *tl_hba;
  846. unsigned short tpgt;
  847. tl_hba = tl_tpg->tl_hba;
  848. tpgt = tl_tpg->tl_tpgt;
  849. /*
  850. * Release the I_T Nexus for the Virtual target link if present
  851. */
  852. tcm_loop_drop_nexus(tl_tpg);
  853. /*
  854. * Deregister the tl_tpg as a emulated TCM Target Endpoint
  855. */
  856. core_tpg_deregister(se_tpg);
  857. tl_tpg->tl_hba = NULL;
  858. tl_tpg->tl_tpgt = 0;
  859. pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
  860. tcm_loop_dump_proto_id(tl_hba),
  861. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  862. }
  863. /* End items for tcm_loop_naa_cit */
  864. /* Start items for tcm_loop_cit */
  865. static struct se_wwn *tcm_loop_make_scsi_hba(
  866. struct target_fabric_configfs *tf,
  867. struct config_group *group,
  868. const char *name)
  869. {
  870. struct tcm_loop_hba *tl_hba;
  871. struct Scsi_Host *sh;
  872. char *ptr;
  873. int ret, off = 0;
  874. tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
  875. if (!tl_hba)
  876. return ERR_PTR(-ENOMEM);
  877. /*
  878. * Determine the emulated Protocol Identifier and Target Port Name
  879. * based on the incoming configfs directory name.
  880. */
  881. ptr = strstr(name, "naa.");
  882. if (ptr) {
  883. tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
  884. goto check_len;
  885. }
  886. ptr = strstr(name, "fc.");
  887. if (ptr) {
  888. tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
  889. off = 3; /* Skip over "fc." */
  890. goto check_len;
  891. }
  892. ptr = strstr(name, "iqn.");
  893. if (!ptr) {
  894. pr_err("Unable to locate prefix for emulated Target Port: %s\n",
  895. name);
  896. ret = -EINVAL;
  897. goto out;
  898. }
  899. tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
  900. check_len:
  901. if (strlen(name) >= TL_WWN_ADDR_LEN) {
  902. pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
  903. name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
  904. ret = -EINVAL;
  905. goto out;
  906. }
  907. snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
  908. /*
  909. * Call device_register(tl_hba->dev) to register the emulated
  910. * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
  911. * device_register() callbacks in tcm_loop_driver_probe()
  912. */
  913. ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
  914. if (ret)
  915. return ERR_PTR(ret);
  916. sh = tl_hba->sh;
  917. tcm_loop_hba_no_cnt++;
  918. pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
  919. tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
  920. return &tl_hba->tl_hba_wwn;
  921. out:
  922. kfree(tl_hba);
  923. return ERR_PTR(ret);
  924. }
  925. static void tcm_loop_drop_scsi_hba(
  926. struct se_wwn *wwn)
  927. {
  928. struct tcm_loop_hba *tl_hba = container_of(wwn,
  929. struct tcm_loop_hba, tl_hba_wwn);
  930. pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
  931. tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
  932. tl_hba->sh->host_no);
  933. /*
  934. * Call device_unregister() on the original tl_hba->dev.
  935. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
  936. * release *tl_hba;
  937. */
  938. device_unregister(&tl_hba->dev);
  939. }
  940. /* Start items for tcm_loop_cit */
  941. static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
  942. {
  943. return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
  944. }
  945. CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
  946. static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
  947. &tcm_loop_wwn_attr_version,
  948. NULL,
  949. };
  950. /* End items for tcm_loop_cit */
  951. static const struct target_core_fabric_ops loop_ops = {
  952. .module = THIS_MODULE,
  953. .fabric_name = "loopback",
  954. .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
  955. .tpg_get_tag = tcm_loop_get_tag,
  956. .tpg_check_demo_mode = tcm_loop_check_demo_mode,
  957. .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
  958. .tpg_check_demo_mode_write_protect =
  959. tcm_loop_check_demo_mode_write_protect,
  960. .tpg_check_prod_mode_write_protect =
  961. tcm_loop_check_prod_mode_write_protect,
  962. .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
  963. .tpg_get_inst_index = tcm_loop_get_inst_index,
  964. .check_stop_free = tcm_loop_check_stop_free,
  965. .release_cmd = tcm_loop_release_cmd,
  966. .sess_get_index = tcm_loop_sess_get_index,
  967. .write_pending = tcm_loop_write_pending,
  968. .set_default_node_attributes = tcm_loop_set_default_node_attributes,
  969. .get_cmd_state = tcm_loop_get_cmd_state,
  970. .queue_data_in = tcm_loop_queue_data_in,
  971. .queue_status = tcm_loop_queue_status,
  972. .queue_tm_rsp = tcm_loop_queue_tm_rsp,
  973. .aborted_task = tcm_loop_aborted_task,
  974. .fabric_make_wwn = tcm_loop_make_scsi_hba,
  975. .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
  976. .fabric_make_tpg = tcm_loop_make_naa_tpg,
  977. .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
  978. .fabric_post_link = tcm_loop_port_link,
  979. .fabric_pre_unlink = tcm_loop_port_unlink,
  980. .tfc_wwn_attrs = tcm_loop_wwn_attrs,
  981. .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
  982. .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
  983. };
  984. static int __init tcm_loop_fabric_init(void)
  985. {
  986. int ret = -ENOMEM;
  987. tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
  988. sizeof(struct tcm_loop_cmd),
  989. __alignof__(struct tcm_loop_cmd),
  990. 0, NULL);
  991. if (!tcm_loop_cmd_cache) {
  992. pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
  993. goto out;
  994. }
  995. ret = tcm_loop_alloc_core_bus();
  996. if (ret)
  997. goto out_destroy_cache;
  998. ret = target_register_template(&loop_ops);
  999. if (ret)
  1000. goto out_release_core_bus;
  1001. return 0;
  1002. out_release_core_bus:
  1003. tcm_loop_release_core_bus();
  1004. out_destroy_cache:
  1005. kmem_cache_destroy(tcm_loop_cmd_cache);
  1006. out:
  1007. return ret;
  1008. }
  1009. static void __exit tcm_loop_fabric_exit(void)
  1010. {
  1011. target_unregister_template(&loop_ops);
  1012. tcm_loop_release_core_bus();
  1013. kmem_cache_destroy(tcm_loop_cmd_cache);
  1014. }
  1015. MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
  1016. MODULE_AUTHOR("Nicholas A. Bellinger <[email protected]>");
  1017. MODULE_LICENSE("GPL");
  1018. module_init(tcm_loop_fabric_init);
  1019. module_exit(tcm_loop_fabric_exit);