passthru.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * NVMe Over Fabrics Target Passthrough command implementation.
  4. *
  5. * Copyright (c) 2017-2018 Western Digital Corporation or its
  6. * affiliates.
  7. * Copyright (c) 2019-2020, Eideticom Inc.
  8. *
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/module.h>
  12. #include "../host/nvme.h"
  13. #include "nvmet.h"
  14. MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
  15. /*
  16. * xarray to maintain one passthru subsystem per nvme controller.
  17. */
  18. static DEFINE_XARRAY(passthru_subsystems);
  19. void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
  20. {
  21. /*
  22. * Multiple command set support can only be declared if the underlying
  23. * controller actually supports it.
  24. */
  25. if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
  26. ctrl->cap &= ~(1ULL << 43);
  27. }
  28. static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
  29. {
  30. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  31. u16 status = NVME_SC_SUCCESS;
  32. int pos, len;
  33. bool csi_seen = false;
  34. void *data;
  35. u8 csi;
  36. if (!ctrl->subsys->clear_ids)
  37. return status;
  38. data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
  39. if (!data)
  40. return NVME_SC_INTERNAL;
  41. status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
  42. if (status)
  43. goto out_free;
  44. for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
  45. struct nvme_ns_id_desc *cur = data + pos;
  46. if (cur->nidl == 0)
  47. break;
  48. if (cur->nidt == NVME_NIDT_CSI) {
  49. memcpy(&csi, cur + 1, NVME_NIDT_CSI_LEN);
  50. csi_seen = true;
  51. break;
  52. }
  53. len = sizeof(struct nvme_ns_id_desc) + cur->nidl;
  54. }
  55. memset(data, 0, NVME_IDENTIFY_DATA_SIZE);
  56. if (csi_seen) {
  57. struct nvme_ns_id_desc *cur = data;
  58. cur->nidt = NVME_NIDT_CSI;
  59. cur->nidl = NVME_NIDT_CSI_LEN;
  60. memcpy(cur + 1, &csi, NVME_NIDT_CSI_LEN);
  61. }
  62. status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
  63. out_free:
  64. kfree(data);
  65. return status;
  66. }
  67. static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
  68. {
  69. struct nvmet_ctrl *ctrl = req->sq->ctrl;
  70. struct nvme_ctrl *pctrl = ctrl->subsys->passthru_ctrl;
  71. u16 status = NVME_SC_SUCCESS;
  72. struct nvme_id_ctrl *id;
  73. unsigned int max_hw_sectors;
  74. int page_shift;
  75. id = kzalloc(sizeof(*id), GFP_KERNEL);
  76. if (!id)
  77. return NVME_SC_INTERNAL;
  78. status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
  79. if (status)
  80. goto out_free;
  81. id->cntlid = cpu_to_le16(ctrl->cntlid);
  82. id->ver = cpu_to_le32(ctrl->subsys->ver);
  83. /*
  84. * The passthru NVMe driver may have a limit on the number of segments
  85. * which depends on the host's memory fragementation. To solve this,
  86. * ensure mdts is limited to the pages equal to the number of segments.
  87. */
  88. max_hw_sectors = min_not_zero(pctrl->max_segments << (PAGE_SHIFT - 9),
  89. pctrl->max_hw_sectors);
  90. /*
  91. * nvmet_passthru_map_sg is limitted to using a single bio so limit
  92. * the mdts based on BIO_MAX_VECS as well
  93. */
  94. max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9),
  95. max_hw_sectors);
  96. page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
  97. id->mdts = ilog2(max_hw_sectors) + 9 - page_shift;
  98. id->acl = 3;
  99. /*
  100. * We export aerl limit for the fabrics controller, update this when
  101. * passthru based aerl support is added.
  102. */
  103. id->aerl = NVMET_ASYNC_EVENTS - 1;
  104. /* emulate kas as most of the PCIe ctrl don't have a support for kas */
  105. id->kas = cpu_to_le16(NVMET_KAS);
  106. /* don't support host memory buffer */
  107. id->hmpre = 0;
  108. id->hmmin = 0;
  109. id->sqes = min_t(__u8, ((0x6 << 4) | 0x6), id->sqes);
  110. id->cqes = min_t(__u8, ((0x4 << 4) | 0x4), id->cqes);
  111. id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
  112. /* don't support fuse commands */
  113. id->fuses = 0;
  114. id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
  115. if (ctrl->ops->flags & NVMF_KEYED_SGLS)
  116. id->sgls |= cpu_to_le32(1 << 2);
  117. if (req->port->inline_data_size)
  118. id->sgls |= cpu_to_le32(1 << 20);
  119. /*
  120. * When passthru controller is setup using nvme-loop transport it will
  121. * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
  122. * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
  123. * code path with duplicate ctr subsynqn. In order to prevent that we
  124. * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
  125. */
  126. memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
  127. /* use fabric id-ctrl values */
  128. id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
  129. req->port->inline_data_size) / 16);
  130. id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
  131. id->msdbd = ctrl->ops->msdbd;
  132. /* Support multipath connections with fabrics */
  133. id->cmic |= 1 << 1;
  134. /* Disable reservations, see nvmet_parse_passthru_io_cmd() */
  135. id->oncs &= cpu_to_le16(~NVME_CTRL_ONCS_RESERVATIONS);
  136. status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
  137. out_free:
  138. kfree(id);
  139. return status;
  140. }
  141. static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
  142. {
  143. u16 status = NVME_SC_SUCCESS;
  144. struct nvme_id_ns *id;
  145. int i;
  146. id = kzalloc(sizeof(*id), GFP_KERNEL);
  147. if (!id)
  148. return NVME_SC_INTERNAL;
  149. status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
  150. if (status)
  151. goto out_free;
  152. for (i = 0; i < (id->nlbaf + 1); i++)
  153. if (id->lbaf[i].ms)
  154. memset(&id->lbaf[i], 0, sizeof(id->lbaf[i]));
  155. id->flbas = id->flbas & ~(1 << 4);
  156. /*
  157. * Presently the NVMEof target code does not support sending
  158. * metadata, so we must disable it here. This should be updated
  159. * once target starts supporting metadata.
  160. */
  161. id->mc = 0;
  162. if (req->sq->ctrl->subsys->clear_ids) {
  163. memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
  164. memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
  165. }
  166. status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
  167. out_free:
  168. kfree(id);
  169. return status;
  170. }
  171. static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
  172. {
  173. struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
  174. struct request *rq = req->p.rq;
  175. struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
  176. u32 effects;
  177. int status;
  178. status = nvme_execute_passthru_rq(rq, &effects);
  179. if (status == NVME_SC_SUCCESS &&
  180. req->cmd->common.opcode == nvme_admin_identify) {
  181. switch (req->cmd->identify.cns) {
  182. case NVME_ID_CNS_CTRL:
  183. nvmet_passthru_override_id_ctrl(req);
  184. break;
  185. case NVME_ID_CNS_NS:
  186. nvmet_passthru_override_id_ns(req);
  187. break;
  188. case NVME_ID_CNS_NS_DESC_LIST:
  189. nvmet_passthru_override_id_descs(req);
  190. break;
  191. }
  192. } else if (status < 0)
  193. status = NVME_SC_INTERNAL;
  194. req->cqe->result = nvme_req(rq)->result;
  195. nvmet_req_complete(req, status);
  196. blk_mq_free_request(rq);
  197. if (effects)
  198. nvme_passthru_end(ctrl, effects, req->cmd, status);
  199. }
  200. static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
  201. blk_status_t blk_status)
  202. {
  203. struct nvmet_req *req = rq->end_io_data;
  204. req->cqe->result = nvme_req(rq)->result;
  205. nvmet_req_complete(req, nvme_req(rq)->status);
  206. blk_mq_free_request(rq);
  207. return RQ_END_IO_NONE;
  208. }
  209. static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
  210. {
  211. struct scatterlist *sg;
  212. struct bio *bio;
  213. int i;
  214. if (req->sg_cnt > BIO_MAX_VECS)
  215. return -EINVAL;
  216. if (nvmet_use_inline_bvec(req)) {
  217. bio = &req->p.inline_bio;
  218. bio_init(bio, NULL, req->inline_bvec,
  219. ARRAY_SIZE(req->inline_bvec), req_op(rq));
  220. } else {
  221. bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq),
  222. GFP_KERNEL);
  223. bio->bi_end_io = bio_put;
  224. }
  225. for_each_sg(req->sg, sg, req->sg_cnt, i) {
  226. if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
  227. sg->offset) < sg->length) {
  228. nvmet_req_bio_put(req, bio);
  229. return -EINVAL;
  230. }
  231. }
  232. blk_rq_bio_prep(rq, bio, req->sg_cnt);
  233. return 0;
  234. }
  235. static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
  236. {
  237. struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
  238. struct request_queue *q = ctrl->admin_q;
  239. struct nvme_ns *ns = NULL;
  240. struct request *rq = NULL;
  241. unsigned int timeout;
  242. u32 effects;
  243. u16 status;
  244. int ret;
  245. if (likely(req->sq->qid != 0)) {
  246. u32 nsid = le32_to_cpu(req->cmd->common.nsid);
  247. ns = nvme_find_get_ns(ctrl, nsid);
  248. if (unlikely(!ns)) {
  249. pr_err("failed to get passthru ns nsid:%u\n", nsid);
  250. status = NVME_SC_INVALID_NS | NVME_SC_DNR;
  251. goto out;
  252. }
  253. q = ns->queue;
  254. timeout = nvmet_req_subsys(req)->io_timeout;
  255. } else {
  256. timeout = nvmet_req_subsys(req)->admin_timeout;
  257. }
  258. rq = blk_mq_alloc_request(q, nvme_req_op(req->cmd), 0);
  259. if (IS_ERR(rq)) {
  260. status = NVME_SC_INTERNAL;
  261. goto out_put_ns;
  262. }
  263. nvme_init_request(rq, req->cmd);
  264. if (timeout)
  265. rq->timeout = timeout;
  266. if (req->sg_cnt) {
  267. ret = nvmet_passthru_map_sg(req, rq);
  268. if (unlikely(ret)) {
  269. status = NVME_SC_INTERNAL;
  270. goto out_put_req;
  271. }
  272. }
  273. /*
  274. * If a command needs post-execution fixups, or there are any
  275. * non-trivial effects, make sure to execute the command synchronously
  276. * in a workqueue so that nvme_passthru_end gets called.
  277. */
  278. effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
  279. if (req->p.use_workqueue ||
  280. (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) {
  281. INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
  282. req->p.rq = rq;
  283. queue_work(nvmet_wq, &req->p.work);
  284. } else {
  285. rq->end_io = nvmet_passthru_req_done;
  286. rq->end_io_data = req;
  287. blk_execute_rq_nowait(rq, false);
  288. }
  289. if (ns)
  290. nvme_put_ns(ns);
  291. return;
  292. out_put_req:
  293. blk_mq_free_request(rq);
  294. out_put_ns:
  295. if (ns)
  296. nvme_put_ns(ns);
  297. out:
  298. nvmet_req_complete(req, status);
  299. }
  300. /*
  301. * We need to emulate set host behaviour to ensure that any requested
  302. * behaviour of the target's host matches the requested behaviour
  303. * of the device's host and fail otherwise.
  304. */
  305. static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
  306. {
  307. struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
  308. struct nvme_feat_host_behavior *host;
  309. u16 status = NVME_SC_INTERNAL;
  310. int ret;
  311. host = kzalloc(sizeof(*host) * 2, GFP_KERNEL);
  312. if (!host)
  313. goto out_complete_req;
  314. ret = nvme_get_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
  315. host, sizeof(*host), NULL);
  316. if (ret)
  317. goto out_free_host;
  318. status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
  319. if (status)
  320. goto out_free_host;
  321. if (memcmp(&host[0], &host[1], sizeof(host[0]))) {
  322. pr_warn("target host has requested different behaviour from the local host\n");
  323. status = NVME_SC_INTERNAL;
  324. }
  325. out_free_host:
  326. kfree(host);
  327. out_complete_req:
  328. nvmet_req_complete(req, status);
  329. }
  330. static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
  331. {
  332. req->p.use_workqueue = false;
  333. req->execute = nvmet_passthru_execute_cmd;
  334. return NVME_SC_SUCCESS;
  335. }
  336. u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
  337. {
  338. /* Reject any commands with non-sgl flags set (ie. fused commands) */
  339. if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
  340. return NVME_SC_INVALID_FIELD;
  341. switch (req->cmd->common.opcode) {
  342. case nvme_cmd_resv_register:
  343. case nvme_cmd_resv_report:
  344. case nvme_cmd_resv_acquire:
  345. case nvme_cmd_resv_release:
  346. /*
  347. * Reservations cannot be supported properly because the
  348. * underlying device has no way of differentiating different
  349. * hosts that connect via fabrics. This could potentially be
  350. * emulated in the future if regular targets grow support for
  351. * this feature.
  352. */
  353. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  354. }
  355. return nvmet_setup_passthru_command(req);
  356. }
  357. /*
  358. * Only features that are emulated or specifically allowed in the list are
  359. * passed down to the controller. This function implements the allow list for
  360. * both get and set features.
  361. */
  362. static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
  363. {
  364. switch (le32_to_cpu(req->cmd->features.fid)) {
  365. case NVME_FEAT_ARBITRATION:
  366. case NVME_FEAT_POWER_MGMT:
  367. case NVME_FEAT_LBA_RANGE:
  368. case NVME_FEAT_TEMP_THRESH:
  369. case NVME_FEAT_ERR_RECOVERY:
  370. case NVME_FEAT_VOLATILE_WC:
  371. case NVME_FEAT_WRITE_ATOMIC:
  372. case NVME_FEAT_AUTO_PST:
  373. case NVME_FEAT_TIMESTAMP:
  374. case NVME_FEAT_HCTM:
  375. case NVME_FEAT_NOPSC:
  376. case NVME_FEAT_RRL:
  377. case NVME_FEAT_PLM_CONFIG:
  378. case NVME_FEAT_PLM_WINDOW:
  379. case NVME_FEAT_HOST_BEHAVIOR:
  380. case NVME_FEAT_SANITIZE:
  381. case NVME_FEAT_VENDOR_START ... NVME_FEAT_VENDOR_END:
  382. return nvmet_setup_passthru_command(req);
  383. case NVME_FEAT_ASYNC_EVENT:
  384. /* There is no support for forwarding ASYNC events */
  385. case NVME_FEAT_IRQ_COALESCE:
  386. case NVME_FEAT_IRQ_CONFIG:
  387. /* The IRQ settings will not apply to the target controller */
  388. case NVME_FEAT_HOST_MEM_BUF:
  389. /*
  390. * Any HMB that's set will not be passed through and will
  391. * not work as expected
  392. */
  393. case NVME_FEAT_SW_PROGRESS:
  394. /*
  395. * The Pre-Boot Software Load Count doesn't make much
  396. * sense for a target to export
  397. */
  398. case NVME_FEAT_RESV_MASK:
  399. case NVME_FEAT_RESV_PERSIST:
  400. /* No reservations, see nvmet_parse_passthru_io_cmd() */
  401. default:
  402. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  403. }
  404. }
  405. u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
  406. {
  407. /* Reject any commands with non-sgl flags set (ie. fused commands) */
  408. if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
  409. return NVME_SC_INVALID_FIELD;
  410. /*
  411. * Passthru all vendor specific commands
  412. */
  413. if (req->cmd->common.opcode >= nvme_admin_vendor_start)
  414. return nvmet_setup_passthru_command(req);
  415. switch (req->cmd->common.opcode) {
  416. case nvme_admin_async_event:
  417. req->execute = nvmet_execute_async_event;
  418. return NVME_SC_SUCCESS;
  419. case nvme_admin_keep_alive:
  420. /*
  421. * Most PCIe ctrls don't support keep alive cmd, we route keep
  422. * alive to the non-passthru mode. In future please change this
  423. * code when PCIe ctrls with keep alive support available.
  424. */
  425. req->execute = nvmet_execute_keep_alive;
  426. return NVME_SC_SUCCESS;
  427. case nvme_admin_set_features:
  428. switch (le32_to_cpu(req->cmd->features.fid)) {
  429. case NVME_FEAT_ASYNC_EVENT:
  430. case NVME_FEAT_KATO:
  431. case NVME_FEAT_NUM_QUEUES:
  432. case NVME_FEAT_HOST_ID:
  433. req->execute = nvmet_execute_set_features;
  434. return NVME_SC_SUCCESS;
  435. case NVME_FEAT_HOST_BEHAVIOR:
  436. req->execute = nvmet_passthru_set_host_behaviour;
  437. return NVME_SC_SUCCESS;
  438. default:
  439. return nvmet_passthru_get_set_features(req);
  440. }
  441. break;
  442. case nvme_admin_get_features:
  443. switch (le32_to_cpu(req->cmd->features.fid)) {
  444. case NVME_FEAT_ASYNC_EVENT:
  445. case NVME_FEAT_KATO:
  446. case NVME_FEAT_NUM_QUEUES:
  447. case NVME_FEAT_HOST_ID:
  448. req->execute = nvmet_execute_get_features;
  449. return NVME_SC_SUCCESS;
  450. default:
  451. return nvmet_passthru_get_set_features(req);
  452. }
  453. break;
  454. case nvme_admin_identify:
  455. switch (req->cmd->identify.cns) {
  456. case NVME_ID_CNS_CTRL:
  457. req->execute = nvmet_passthru_execute_cmd;
  458. req->p.use_workqueue = true;
  459. return NVME_SC_SUCCESS;
  460. case NVME_ID_CNS_CS_CTRL:
  461. switch (req->cmd->identify.csi) {
  462. case NVME_CSI_ZNS:
  463. req->execute = nvmet_passthru_execute_cmd;
  464. req->p.use_workqueue = true;
  465. return NVME_SC_SUCCESS;
  466. }
  467. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  468. case NVME_ID_CNS_NS:
  469. req->execute = nvmet_passthru_execute_cmd;
  470. req->p.use_workqueue = true;
  471. return NVME_SC_SUCCESS;
  472. case NVME_ID_CNS_CS_NS:
  473. switch (req->cmd->identify.csi) {
  474. case NVME_CSI_ZNS:
  475. req->execute = nvmet_passthru_execute_cmd;
  476. req->p.use_workqueue = true;
  477. return NVME_SC_SUCCESS;
  478. }
  479. return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
  480. default:
  481. return nvmet_setup_passthru_command(req);
  482. }
  483. case nvme_admin_get_log_page:
  484. return nvmet_setup_passthru_command(req);
  485. default:
  486. /* Reject commands not in the allowlist above */
  487. return nvmet_report_invalid_opcode(req);
  488. }
  489. }
  490. int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
  491. {
  492. struct nvme_ctrl *ctrl;
  493. struct file *file;
  494. int ret = -EINVAL;
  495. void *old;
  496. mutex_lock(&subsys->lock);
  497. if (!subsys->passthru_ctrl_path)
  498. goto out_unlock;
  499. if (subsys->passthru_ctrl)
  500. goto out_unlock;
  501. if (subsys->nr_namespaces) {
  502. pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
  503. goto out_unlock;
  504. }
  505. file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0);
  506. if (IS_ERR(file)) {
  507. ret = PTR_ERR(file);
  508. goto out_unlock;
  509. }
  510. ctrl = nvme_ctrl_from_file(file);
  511. if (!ctrl) {
  512. pr_err("failed to open nvme controller %s\n",
  513. subsys->passthru_ctrl_path);
  514. goto out_put_file;
  515. }
  516. old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL,
  517. subsys, GFP_KERNEL);
  518. if (xa_is_err(old)) {
  519. ret = xa_err(old);
  520. goto out_put_file;
  521. }
  522. if (old)
  523. goto out_put_file;
  524. subsys->passthru_ctrl = ctrl;
  525. subsys->ver = ctrl->vs;
  526. if (subsys->ver < NVME_VS(1, 2, 1)) {
  527. pr_warn("nvme controller version is too old: %llu.%llu.%llu, advertising 1.2.1\n",
  528. NVME_MAJOR(subsys->ver), NVME_MINOR(subsys->ver),
  529. NVME_TERTIARY(subsys->ver));
  530. subsys->ver = NVME_VS(1, 2, 1);
  531. }
  532. nvme_get_ctrl(ctrl);
  533. __module_get(subsys->passthru_ctrl->ops->module);
  534. ret = 0;
  535. out_put_file:
  536. filp_close(file, NULL);
  537. out_unlock:
  538. mutex_unlock(&subsys->lock);
  539. return ret;
  540. }
  541. static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
  542. {
  543. if (subsys->passthru_ctrl) {
  544. xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
  545. module_put(subsys->passthru_ctrl->ops->module);
  546. nvme_put_ctrl(subsys->passthru_ctrl);
  547. }
  548. subsys->passthru_ctrl = NULL;
  549. subsys->ver = NVMET_DEFAULT_VS;
  550. }
  551. void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
  552. {
  553. mutex_lock(&subsys->lock);
  554. __nvmet_passthru_ctrl_disable(subsys);
  555. mutex_unlock(&subsys->lock);
  556. }
  557. void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
  558. {
  559. mutex_lock(&subsys->lock);
  560. __nvmet_passthru_ctrl_disable(subsys);
  561. mutex_unlock(&subsys->lock);
  562. kfree(subsys->passthru_ctrl_path);
  563. }