qla_nvme.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QLogic Fibre Channel HBA Driver
  4. * Copyright (c) 2003-2017 QLogic Corporation
  5. */
  6. #include "qla_nvme.h"
  7. #include <linux/scatterlist.h>
  8. #include <linux/delay.h>
  9. #include <linux/nvme.h>
  10. #include <linux/nvme-fc.h>
  11. #include <linux/blk-mq-pci.h>
  12. #include <linux/blk-mq.h>
  13. static struct nvme_fc_port_template qla_nvme_fc_transport;
  14. int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
  15. {
  16. struct qla_nvme_rport *rport;
  17. struct nvme_fc_port_info req;
  18. int ret;
  19. if (!IS_ENABLED(CONFIG_NVME_FC))
  20. return 0;
  21. if (!vha->flags.nvme_enabled) {
  22. ql_log(ql_log_info, vha, 0x2100,
  23. "%s: Not registering target since Host NVME is not enabled\n",
  24. __func__);
  25. return 0;
  26. }
  27. if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
  28. return 0;
  29. if (!(fcport->nvme_prli_service_param &
  30. (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
  31. (fcport->nvme_flag & NVME_FLAG_REGISTERED))
  32. return 0;
  33. fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
  34. memset(&req, 0, sizeof(struct nvme_fc_port_info));
  35. req.port_name = wwn_to_u64(fcport->port_name);
  36. req.node_name = wwn_to_u64(fcport->node_name);
  37. req.port_role = 0;
  38. req.dev_loss_tmo = fcport->dev_loss_tmo;
  39. if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
  40. req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
  41. if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
  42. req.port_role |= FC_PORT_ROLE_NVME_TARGET;
  43. if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
  44. req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
  45. req.port_id = fcport->d_id.b24;
  46. ql_log(ql_log_info, vha, 0x2102,
  47. "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
  48. __func__, req.node_name, req.port_name,
  49. req.port_id);
  50. ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
  51. &fcport->nvme_remote_port);
  52. if (ret) {
  53. ql_log(ql_log_warn, vha, 0x212e,
  54. "Failed to register remote port. Transport returned %d\n",
  55. ret);
  56. return ret;
  57. }
  58. nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
  59. fcport->dev_loss_tmo);
  60. if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
  61. ql_log(ql_log_info, vha, 0x212a,
  62. "PortID:%06x Supports SLER\n", req.port_id);
  63. if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
  64. ql_log(ql_log_info, vha, 0x212b,
  65. "PortID:%06x Supports PI control\n", req.port_id);
  66. rport = fcport->nvme_remote_port->private;
  67. rport->fcport = fcport;
  68. fcport->nvme_flag |= NVME_FLAG_REGISTERED;
  69. return 0;
  70. }
  71. /* Allocate a queue for NVMe traffic */
  72. static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
  73. unsigned int qidx, u16 qsize, void **handle)
  74. {
  75. struct scsi_qla_host *vha;
  76. struct qla_hw_data *ha;
  77. struct qla_qpair *qpair;
  78. /* Map admin queue and 1st IO queue to index 0 */
  79. if (qidx)
  80. qidx--;
  81. vha = (struct scsi_qla_host *)lport->private;
  82. ha = vha->hw;
  83. ql_log(ql_log_info, vha, 0x2104,
  84. "%s: handle %p, idx =%d, qsize %d\n",
  85. __func__, handle, qidx, qsize);
  86. if (qidx > qla_nvme_fc_transport.max_hw_queues) {
  87. ql_log(ql_log_warn, vha, 0x212f,
  88. "%s: Illegal qidx=%d. Max=%d\n",
  89. __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
  90. return -EINVAL;
  91. }
  92. /* Use base qpair if max_qpairs is 0 */
  93. if (!ha->max_qpairs) {
  94. qpair = ha->base_qpair;
  95. } else {
  96. if (ha->queue_pair_map[qidx]) {
  97. *handle = ha->queue_pair_map[qidx];
  98. ql_log(ql_log_info, vha, 0x2121,
  99. "Returning existing qpair of %p for idx=%x\n",
  100. *handle, qidx);
  101. return 0;
  102. }
  103. qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
  104. if (!qpair) {
  105. ql_log(ql_log_warn, vha, 0x2122,
  106. "Failed to allocate qpair\n");
  107. return -EINVAL;
  108. }
  109. qla_adjust_iocb_limit(vha);
  110. }
  111. *handle = qpair;
  112. return 0;
  113. }
  114. static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
  115. {
  116. struct srb *sp = container_of(kref, struct srb, cmd_kref);
  117. struct nvme_private *priv = (struct nvme_private *)sp->priv;
  118. struct nvmefc_fcp_req *fd;
  119. struct srb_iocb *nvme;
  120. unsigned long flags;
  121. if (!priv)
  122. goto out;
  123. nvme = &sp->u.iocb_cmd;
  124. fd = nvme->u.nvme.desc;
  125. spin_lock_irqsave(&priv->cmd_lock, flags);
  126. priv->sp = NULL;
  127. sp->priv = NULL;
  128. if (priv->comp_status == QLA_SUCCESS) {
  129. fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
  130. fd->status = NVME_SC_SUCCESS;
  131. } else {
  132. fd->rcv_rsplen = 0;
  133. fd->transferred_length = 0;
  134. fd->status = NVME_SC_INTERNAL;
  135. }
  136. spin_unlock_irqrestore(&priv->cmd_lock, flags);
  137. fd->done(fd);
  138. out:
  139. qla2xxx_rel_qpair_sp(sp->qpair, sp);
  140. }
  141. static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
  142. {
  143. struct srb *sp = container_of(kref, struct srb, cmd_kref);
  144. struct nvme_private *priv = (struct nvme_private *)sp->priv;
  145. struct nvmefc_ls_req *fd;
  146. unsigned long flags;
  147. if (!priv)
  148. goto out;
  149. spin_lock_irqsave(&priv->cmd_lock, flags);
  150. priv->sp = NULL;
  151. sp->priv = NULL;
  152. spin_unlock_irqrestore(&priv->cmd_lock, flags);
  153. fd = priv->fd;
  154. fd->done(fd, priv->comp_status);
  155. out:
  156. qla2x00_rel_sp(sp);
  157. }
  158. static void qla_nvme_ls_complete(struct work_struct *work)
  159. {
  160. struct nvme_private *priv =
  161. container_of(work, struct nvme_private, ls_work);
  162. kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
  163. }
  164. static void qla_nvme_sp_ls_done(srb_t *sp, int res)
  165. {
  166. struct nvme_private *priv = sp->priv;
  167. if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
  168. return;
  169. if (res)
  170. res = -EINVAL;
  171. priv->comp_status = res;
  172. INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
  173. schedule_work(&priv->ls_work);
  174. }
  175. /* it assumed that QPair lock is held. */
  176. static void qla_nvme_sp_done(srb_t *sp, int res)
  177. {
  178. struct nvme_private *priv = sp->priv;
  179. priv->comp_status = res;
  180. kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
  181. return;
  182. }
  183. static void qla_nvme_abort_work(struct work_struct *work)
  184. {
  185. struct nvme_private *priv =
  186. container_of(work, struct nvme_private, abort_work);
  187. srb_t *sp = priv->sp;
  188. fc_port_t *fcport = sp->fcport;
  189. struct qla_hw_data *ha = fcport->vha->hw;
  190. int rval, abts_done_called = 1;
  191. bool io_wait_for_abort_done;
  192. uint32_t handle;
  193. ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
  194. "%s called for sp=%p, hndl=%x on fcport=%p desc=%p deleted=%d\n",
  195. __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted);
  196. if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED)
  197. goto out;
  198. if (ha->flags.host_shutting_down) {
  199. ql_log(ql_log_info, sp->fcport->vha, 0xffff,
  200. "%s Calling done on sp: %p, type: 0x%x\n",
  201. __func__, sp, sp->type);
  202. sp->done(sp, 0);
  203. goto out;
  204. }
  205. /*
  206. * sp may not be valid after abort_command if return code is either
  207. * SUCCESS or ERR_FROM_FW codes, so cache the value here.
  208. */
  209. io_wait_for_abort_done = ql2xabts_wait_nvme &&
  210. QLA_ABTS_WAIT_ENABLED(sp);
  211. handle = sp->handle;
  212. rval = ha->isp_ops->abort_command(sp);
  213. ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
  214. "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
  215. __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
  216. sp, handle, fcport, rval);
  217. /*
  218. * If async tmf is enabled, the abort callback is called only on
  219. * return codes QLA_SUCCESS and QLA_ERR_FROM_FW.
  220. */
  221. if (ql2xasynctmfenable &&
  222. rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW)
  223. abts_done_called = 0;
  224. /*
  225. * Returned before decreasing kref so that I/O requests
  226. * are waited until ABTS complete. This kref is decreased
  227. * at qla24xx_abort_sp_done function.
  228. */
  229. if (abts_done_called && io_wait_for_abort_done)
  230. return;
  231. out:
  232. /* kref_get was done before work was schedule. */
  233. kref_put(&sp->cmd_kref, sp->put_fn);
  234. }
  235. static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
  236. struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
  237. {
  238. struct nvme_private *priv = fd->private;
  239. unsigned long flags;
  240. spin_lock_irqsave(&priv->cmd_lock, flags);
  241. if (!priv->sp) {
  242. spin_unlock_irqrestore(&priv->cmd_lock, flags);
  243. return;
  244. }
  245. if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
  246. spin_unlock_irqrestore(&priv->cmd_lock, flags);
  247. return;
  248. }
  249. spin_unlock_irqrestore(&priv->cmd_lock, flags);
  250. INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
  251. schedule_work(&priv->abort_work);
  252. }
  253. static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
  254. struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
  255. {
  256. struct qla_nvme_rport *qla_rport = rport->private;
  257. fc_port_t *fcport = qla_rport->fcport;
  258. struct srb_iocb *nvme;
  259. struct nvme_private *priv = fd->private;
  260. struct scsi_qla_host *vha;
  261. int rval = QLA_FUNCTION_FAILED;
  262. struct qla_hw_data *ha;
  263. srb_t *sp;
  264. if (!fcport || fcport->deleted)
  265. return rval;
  266. vha = fcport->vha;
  267. ha = vha->hw;
  268. if (!ha->flags.fw_started)
  269. return rval;
  270. /* Alloc SRB structure */
  271. sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
  272. if (!sp)
  273. return rval;
  274. sp->type = SRB_NVME_LS;
  275. sp->name = "nvme_ls";
  276. sp->done = qla_nvme_sp_ls_done;
  277. sp->put_fn = qla_nvme_release_ls_cmd_kref;
  278. sp->priv = priv;
  279. priv->sp = sp;
  280. kref_init(&sp->cmd_kref);
  281. spin_lock_init(&priv->cmd_lock);
  282. nvme = &sp->u.iocb_cmd;
  283. priv->fd = fd;
  284. nvme->u.nvme.desc = fd;
  285. nvme->u.nvme.dir = 0;
  286. nvme->u.nvme.dl = 0;
  287. nvme->u.nvme.cmd_len = fd->rqstlen;
  288. nvme->u.nvme.rsp_len = fd->rsplen;
  289. nvme->u.nvme.rsp_dma = fd->rspdma;
  290. nvme->u.nvme.timeout_sec = fd->timeout;
  291. nvme->u.nvme.cmd_dma = fd->rqstdma;
  292. dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
  293. fd->rqstlen, DMA_TO_DEVICE);
  294. rval = qla2x00_start_sp(sp);
  295. if (rval != QLA_SUCCESS) {
  296. ql_log(ql_log_warn, vha, 0x700e,
  297. "qla2x00_start_sp failed = %d\n", rval);
  298. sp->priv = NULL;
  299. priv->sp = NULL;
  300. qla2x00_rel_sp(sp);
  301. return rval;
  302. }
  303. return rval;
  304. }
  305. static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
  306. struct nvme_fc_remote_port *rport, void *hw_queue_handle,
  307. struct nvmefc_fcp_req *fd)
  308. {
  309. struct nvme_private *priv = fd->private;
  310. unsigned long flags;
  311. spin_lock_irqsave(&priv->cmd_lock, flags);
  312. if (!priv->sp) {
  313. spin_unlock_irqrestore(&priv->cmd_lock, flags);
  314. return;
  315. }
  316. if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
  317. spin_unlock_irqrestore(&priv->cmd_lock, flags);
  318. return;
  319. }
  320. spin_unlock_irqrestore(&priv->cmd_lock, flags);
  321. INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
  322. schedule_work(&priv->abort_work);
  323. }
  324. static inline int qla2x00_start_nvme_mq(srb_t *sp)
  325. {
  326. unsigned long flags;
  327. uint32_t *clr_ptr;
  328. uint32_t handle;
  329. struct cmd_nvme *cmd_pkt;
  330. uint16_t cnt, i;
  331. uint16_t req_cnt;
  332. uint16_t tot_dsds;
  333. uint16_t avail_dsds;
  334. struct dsd64 *cur_dsd;
  335. struct req_que *req = NULL;
  336. struct rsp_que *rsp = NULL;
  337. struct scsi_qla_host *vha = sp->fcport->vha;
  338. struct qla_hw_data *ha = vha->hw;
  339. struct qla_qpair *qpair = sp->qpair;
  340. struct srb_iocb *nvme = &sp->u.iocb_cmd;
  341. struct scatterlist *sgl, *sg;
  342. struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
  343. struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
  344. uint32_t rval = QLA_SUCCESS;
  345. /* Setup qpair pointers */
  346. req = qpair->req;
  347. rsp = qpair->rsp;
  348. tot_dsds = fd->sg_cnt;
  349. /* Acquire qpair specific lock */
  350. spin_lock_irqsave(&qpair->qp_lock, flags);
  351. handle = qla2xxx_get_next_handle(req);
  352. if (handle == 0) {
  353. rval = -EBUSY;
  354. goto queuing_error;
  355. }
  356. req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
  357. sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
  358. sp->iores.exch_cnt = 1;
  359. sp->iores.iocb_cnt = req_cnt;
  360. if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
  361. rval = -EBUSY;
  362. goto queuing_error;
  363. }
  364. if (req->cnt < (req_cnt + 2)) {
  365. if (IS_SHADOW_REG_CAPABLE(ha)) {
  366. cnt = *req->out_ptr;
  367. } else {
  368. cnt = rd_reg_dword_relaxed(req->req_q_out);
  369. if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
  370. rval = -EBUSY;
  371. goto queuing_error;
  372. }
  373. }
  374. if (req->ring_index < cnt)
  375. req->cnt = cnt - req->ring_index;
  376. else
  377. req->cnt = req->length - (req->ring_index - cnt);
  378. if (req->cnt < (req_cnt + 2)){
  379. rval = -EBUSY;
  380. goto queuing_error;
  381. }
  382. }
  383. if (unlikely(!fd->sqid)) {
  384. if (cmd->sqe.common.opcode == nvme_admin_async_event) {
  385. nvme->u.nvme.aen_op = 1;
  386. atomic_inc(&ha->nvme_active_aen_cnt);
  387. }
  388. }
  389. /* Build command packet. */
  390. req->current_outstanding_cmd = handle;
  391. req->outstanding_cmds[handle] = sp;
  392. sp->handle = handle;
  393. req->cnt -= req_cnt;
  394. cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
  395. cmd_pkt->handle = make_handle(req->id, handle);
  396. /* Zero out remaining portion of packet. */
  397. clr_ptr = (uint32_t *)cmd_pkt + 2;
  398. memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
  399. cmd_pkt->entry_status = 0;
  400. /* Update entry type to indicate Command NVME IOCB */
  401. cmd_pkt->entry_type = COMMAND_NVME;
  402. /* No data transfer how do we check buffer len == 0?? */
  403. if (fd->io_dir == NVMEFC_FCP_READ) {
  404. cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
  405. qpair->counters.input_bytes += fd->payload_length;
  406. qpair->counters.input_requests++;
  407. } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
  408. cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
  409. if ((vha->flags.nvme_first_burst) &&
  410. (sp->fcport->nvme_prli_service_param &
  411. NVME_PRLI_SP_FIRST_BURST)) {
  412. if ((fd->payload_length <=
  413. sp->fcport->nvme_first_burst_size) ||
  414. (sp->fcport->nvme_first_burst_size == 0))
  415. cmd_pkt->control_flags |=
  416. cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
  417. }
  418. qpair->counters.output_bytes += fd->payload_length;
  419. qpair->counters.output_requests++;
  420. } else if (fd->io_dir == 0) {
  421. cmd_pkt->control_flags = 0;
  422. }
  423. if (sp->fcport->edif.enable && fd->io_dir != 0)
  424. cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
  425. /* Set BIT_13 of control flags for Async event */
  426. if (vha->flags.nvme2_enabled &&
  427. cmd->sqe.common.opcode == nvme_admin_async_event) {
  428. cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
  429. }
  430. /* Set NPORT-ID */
  431. cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
  432. cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
  433. cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
  434. cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
  435. cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
  436. /* NVME RSP IU */
  437. cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
  438. put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
  439. /* NVME CNMD IU */
  440. cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
  441. cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
  442. cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
  443. cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
  444. /* One DSD is available in the Command Type NVME IOCB */
  445. avail_dsds = 1;
  446. cur_dsd = &cmd_pkt->nvme_dsd;
  447. sgl = fd->first_sgl;
  448. /* Load data segments */
  449. for_each_sg(sgl, sg, tot_dsds, i) {
  450. cont_a64_entry_t *cont_pkt;
  451. /* Allocate additional continuation packets? */
  452. if (avail_dsds == 0) {
  453. /*
  454. * Five DSDs are available in the Continuation
  455. * Type 1 IOCB.
  456. */
  457. /* Adjust ring index */
  458. req->ring_index++;
  459. if (req->ring_index == req->length) {
  460. req->ring_index = 0;
  461. req->ring_ptr = req->ring;
  462. } else {
  463. req->ring_ptr++;
  464. }
  465. cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
  466. put_unaligned_le32(CONTINUE_A64_TYPE,
  467. &cont_pkt->entry_type);
  468. cur_dsd = cont_pkt->dsd;
  469. avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
  470. }
  471. append_dsd64(&cur_dsd, sg);
  472. avail_dsds--;
  473. }
  474. /* Set total entry count. */
  475. cmd_pkt->entry_count = (uint8_t)req_cnt;
  476. wmb();
  477. /* Adjust ring index. */
  478. req->ring_index++;
  479. if (req->ring_index == req->length) {
  480. req->ring_index = 0;
  481. req->ring_ptr = req->ring;
  482. } else {
  483. req->ring_ptr++;
  484. }
  485. /* ignore nvme async cmd due to long timeout */
  486. if (!nvme->u.nvme.aen_op)
  487. sp->qpair->cmd_cnt++;
  488. /* Set chip new ring index. */
  489. wrt_reg_dword(req->req_q_in, req->ring_index);
  490. if (vha->flags.process_response_queue &&
  491. rsp->ring_ptr->signature != RESPONSE_PROCESSED)
  492. qla24xx_process_response_queue(vha, rsp);
  493. queuing_error:
  494. if (rval)
  495. qla_put_fw_resources(sp->qpair, &sp->iores);
  496. spin_unlock_irqrestore(&qpair->qp_lock, flags);
  497. return rval;
  498. }
  499. /* Post a command */
  500. static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
  501. struct nvme_fc_remote_port *rport, void *hw_queue_handle,
  502. struct nvmefc_fcp_req *fd)
  503. {
  504. fc_port_t *fcport;
  505. struct srb_iocb *nvme;
  506. struct scsi_qla_host *vha;
  507. struct qla_hw_data *ha;
  508. int rval;
  509. srb_t *sp;
  510. struct qla_qpair *qpair = hw_queue_handle;
  511. struct nvme_private *priv = fd->private;
  512. struct qla_nvme_rport *qla_rport = rport->private;
  513. if (!priv) {
  514. /* nvme association has been torn down */
  515. return -ENODEV;
  516. }
  517. fcport = qla_rport->fcport;
  518. if (unlikely(!qpair || !fcport || fcport->deleted))
  519. return -EBUSY;
  520. if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
  521. return -ENODEV;
  522. vha = fcport->vha;
  523. ha = vha->hw;
  524. if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  525. return -EBUSY;
  526. /*
  527. * If we know the dev is going away while the transport is still sending
  528. * IO's return busy back to stall the IO Q. This happens when the
  529. * link goes away and fw hasn't notified us yet, but IO's are being
  530. * returned. If the dev comes back quickly we won't exhaust the IO
  531. * retry count at the core.
  532. */
  533. if (fcport->nvme_flag & NVME_FLAG_RESETTING)
  534. return -EBUSY;
  535. qpair = qla_mapq_nvme_select_qpair(ha, qpair);
  536. /* Alloc SRB structure */
  537. sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
  538. if (!sp)
  539. return -EBUSY;
  540. kref_init(&sp->cmd_kref);
  541. spin_lock_init(&priv->cmd_lock);
  542. sp->priv = priv;
  543. priv->sp = sp;
  544. sp->type = SRB_NVME_CMD;
  545. sp->name = "nvme_cmd";
  546. sp->done = qla_nvme_sp_done;
  547. sp->put_fn = qla_nvme_release_fcp_cmd_kref;
  548. sp->qpair = qpair;
  549. sp->vha = vha;
  550. sp->cmd_sp = sp;
  551. nvme = &sp->u.iocb_cmd;
  552. nvme->u.nvme.desc = fd;
  553. rval = qla2x00_start_nvme_mq(sp);
  554. if (rval != QLA_SUCCESS) {
  555. ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
  556. "qla2x00_start_nvme_mq failed = %d\n", rval);
  557. sp->priv = NULL;
  558. priv->sp = NULL;
  559. qla2xxx_rel_qpair_sp(sp->qpair, sp);
  560. }
  561. return rval;
  562. }
  563. static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
  564. struct blk_mq_queue_map *map)
  565. {
  566. struct scsi_qla_host *vha = lport->private;
  567. blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
  568. }
  569. static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
  570. {
  571. struct scsi_qla_host *vha = lport->private;
  572. ql_log(ql_log_info, vha, 0x210f,
  573. "localport delete of %p completed.\n", vha->nvme_local_port);
  574. vha->nvme_local_port = NULL;
  575. complete(&vha->nvme_del_done);
  576. }
  577. static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
  578. {
  579. fc_port_t *fcport;
  580. struct qla_nvme_rport *qla_rport = rport->private;
  581. fcport = qla_rport->fcport;
  582. fcport->nvme_remote_port = NULL;
  583. fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
  584. fcport->nvme_flag &= ~NVME_FLAG_DELETING;
  585. ql_log(ql_log_info, fcport->vha, 0x2110,
  586. "remoteport_delete of %p %8phN completed.\n",
  587. fcport, fcport->port_name);
  588. complete(&fcport->nvme_del_done);
  589. }
  590. static struct nvme_fc_port_template qla_nvme_fc_transport = {
  591. .localport_delete = qla_nvme_localport_delete,
  592. .remoteport_delete = qla_nvme_remoteport_delete,
  593. .create_queue = qla_nvme_alloc_queue,
  594. .delete_queue = NULL,
  595. .ls_req = qla_nvme_ls_req,
  596. .ls_abort = qla_nvme_ls_abort,
  597. .fcp_io = qla_nvme_post_cmd,
  598. .fcp_abort = qla_nvme_fcp_abort,
  599. .map_queues = qla_nvme_map_queues,
  600. .max_hw_queues = DEF_NVME_HW_QUEUES,
  601. .max_sgl_segments = 1024,
  602. .max_dif_sgl_segments = 64,
  603. .dma_boundary = 0xFFFFFFFF,
  604. .local_priv_sz = 8,
  605. .remote_priv_sz = sizeof(struct qla_nvme_rport),
  606. .lsrqst_priv_sz = sizeof(struct nvme_private),
  607. .fcprqst_priv_sz = sizeof(struct nvme_private),
  608. };
  609. void qla_nvme_unregister_remote_port(struct fc_port *fcport)
  610. {
  611. int ret;
  612. if (!IS_ENABLED(CONFIG_NVME_FC))
  613. return;
  614. ql_log(ql_log_warn, fcport->vha, 0x2112,
  615. "%s: unregister remoteport on %p %8phN\n",
  616. __func__, fcport, fcport->port_name);
  617. if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
  618. nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
  619. init_completion(&fcport->nvme_del_done);
  620. ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
  621. if (ret)
  622. ql_log(ql_log_info, fcport->vha, 0x2114,
  623. "%s: Failed to unregister nvme_remote_port (%d)\n",
  624. __func__, ret);
  625. wait_for_completion(&fcport->nvme_del_done);
  626. }
  627. void qla_nvme_delete(struct scsi_qla_host *vha)
  628. {
  629. int nv_ret;
  630. if (!IS_ENABLED(CONFIG_NVME_FC))
  631. return;
  632. if (vha->nvme_local_port) {
  633. init_completion(&vha->nvme_del_done);
  634. ql_log(ql_log_info, vha, 0x2116,
  635. "unregister localport=%p\n",
  636. vha->nvme_local_port);
  637. nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
  638. if (nv_ret)
  639. ql_log(ql_log_info, vha, 0x2115,
  640. "Unregister of localport failed\n");
  641. else
  642. wait_for_completion(&vha->nvme_del_done);
  643. }
  644. }
  645. int qla_nvme_register_hba(struct scsi_qla_host *vha)
  646. {
  647. struct nvme_fc_port_template *tmpl;
  648. struct qla_hw_data *ha;
  649. struct nvme_fc_port_info pinfo;
  650. int ret = -EINVAL;
  651. if (!IS_ENABLED(CONFIG_NVME_FC))
  652. return ret;
  653. ha = vha->hw;
  654. tmpl = &qla_nvme_fc_transport;
  655. if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) {
  656. ql_log(ql_log_warn, vha, 0xfffd,
  657. "ql2xnvme_queues=%d is lower than minimum queues: %d. Resetting ql2xnvme_queues to:%d\n",
  658. ql2xnvme_queues, MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES);
  659. ql2xnvme_queues = DEF_NVME_HW_QUEUES;
  660. } else if (ql2xnvme_queues > (ha->max_qpairs - 1)) {
  661. ql_log(ql_log_warn, vha, 0xfffd,
  662. "ql2xnvme_queues=%d is greater than available IRQs: %d. Resetting ql2xnvme_queues to: %d\n",
  663. ql2xnvme_queues, (ha->max_qpairs - 1),
  664. (ha->max_qpairs - 1));
  665. ql2xnvme_queues = ((ha->max_qpairs - 1));
  666. }
  667. qla_nvme_fc_transport.max_hw_queues =
  668. min((uint8_t)(ql2xnvme_queues),
  669. (uint8_t)((ha->max_qpairs - 1) ? (ha->max_qpairs - 1) : 1));
  670. ql_log(ql_log_info, vha, 0xfffb,
  671. "Number of NVME queues used for this port: %d\n",
  672. qla_nvme_fc_transport.max_hw_queues);
  673. pinfo.node_name = wwn_to_u64(vha->node_name);
  674. pinfo.port_name = wwn_to_u64(vha->port_name);
  675. pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
  676. pinfo.port_id = vha->d_id.b24;
  677. mutex_lock(&ha->vport_lock);
  678. /*
  679. * Check again for nvme_local_port to see if any other thread raced
  680. * with this one and finished registration.
  681. */
  682. if (!vha->nvme_local_port) {
  683. ql_log(ql_log_info, vha, 0xffff,
  684. "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
  685. pinfo.node_name, pinfo.port_name, pinfo.port_id);
  686. qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
  687. ret = nvme_fc_register_localport(&pinfo, tmpl,
  688. get_device(&ha->pdev->dev),
  689. &vha->nvme_local_port);
  690. mutex_unlock(&ha->vport_lock);
  691. } else {
  692. mutex_unlock(&ha->vport_lock);
  693. return 0;
  694. }
  695. if (ret) {
  696. ql_log(ql_log_warn, vha, 0xffff,
  697. "register_localport failed: ret=%x\n", ret);
  698. } else {
  699. vha->nvme_local_port->private = vha;
  700. }
  701. return ret;
  702. }
  703. void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
  704. {
  705. struct qla_hw_data *ha;
  706. if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
  707. return;
  708. ha = orig_sp->fcport->vha->hw;
  709. WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
  710. /* Use Driver Specified Retry Count */
  711. abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
  712. abt->drv.abts_rty_cnt = cpu_to_le16(2);
  713. /* Use specified response timeout */
  714. abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
  715. /* set it to 2 * r_a_tov in secs */
  716. abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
  717. }
  718. void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
  719. {
  720. u16 comp_status;
  721. struct scsi_qla_host *vha;
  722. if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
  723. return;
  724. vha = orig_sp->fcport->vha;
  725. comp_status = le16_to_cpu(abt->comp_status);
  726. switch (comp_status) {
  727. case CS_RESET: /* reset event aborted */
  728. case CS_ABORTED: /* IOCB was cleaned */
  729. /* N_Port handle is not currently logged in */
  730. case CS_TIMEOUT:
  731. /* N_Port handle was logged out while waiting for ABTS to complete */
  732. case CS_PORT_UNAVAILABLE:
  733. /* Firmware found that the port name changed */
  734. case CS_PORT_LOGGED_OUT:
  735. /* BA_RJT was received for the ABTS */
  736. case CS_PORT_CONFIG_CHG:
  737. ql_dbg(ql_dbg_async, vha, 0xf09d,
  738. "Abort I/O IOCB completed with error, comp_status=%x\n",
  739. comp_status);
  740. break;
  741. /* BA_RJT was received for the ABTS */
  742. case CS_REJECT_RECEIVED:
  743. ql_dbg(ql_dbg_async, vha, 0xf09e,
  744. "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
  745. abt->fw.ba_rjt_vendorUnique);
  746. ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
  747. "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
  748. abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
  749. break;
  750. case CS_COMPLETE:
  751. ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
  752. "IOCB request is completed successfully comp_status=%x\n",
  753. comp_status);
  754. break;
  755. case CS_IOCB_ERROR:
  756. ql_dbg(ql_dbg_async, vha, 0xf0a0,
  757. "IOCB request is failed, comp_status=%x\n", comp_status);
  758. break;
  759. default:
  760. ql_dbg(ql_dbg_async, vha, 0xf0a1,
  761. "Invalid Abort IO IOCB Completion Status %x\n",
  762. comp_status);
  763. break;
  764. }
  765. }
  766. inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
  767. {
  768. if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
  769. return;
  770. kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
  771. }