nvmet_fc: add req_release to lldd api
With the advent of the opdone calls changing context, the lldd can no longer assume that once the op->done call returns for RSP operations that the request struct is no longer being accessed. As such, revise the lldd api for a req_release callback that the transport will call when the job is complete. This will also be used with abort cases. Fixed text in api header for change in io complete semantics. Revised lpfc to support the new req_release api. Signed-off-by: James Smart <james.smart@broadcom.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
This commit is contained in:

committed by
Christoph Hellwig

parent
39498faef7
commit
19b58d9473
@@ -408,9 +408,7 @@ out:
|
||||
if (phba->ktime_on)
|
||||
lpfc_nvmet_ktime(phba, ctxp);
|
||||
#endif
|
||||
/* Let Abort cmpl repost the context */
|
||||
if (!(ctxp->flag & LPFC_NVMET_ABORT_OP))
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
|
||||
} else {
|
||||
ctxp->entry_cnt++;
|
||||
start_clean = offsetof(struct lpfc_iocbq, wqe);
|
||||
@@ -634,10 +632,47 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
|
||||
complete(&tport->tport_unreg_done);
|
||||
}
|
||||
|
||||
static void
|
||||
lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
|
||||
struct nvmefc_tgt_fcp_req *rsp)
|
||||
{
|
||||
struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp =
|
||||
container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
|
||||
struct lpfc_hba *phba = ctxp->phba;
|
||||
unsigned long flags;
|
||||
bool aborting = false;
|
||||
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if (ctxp->flag & LPFC_NVMET_ABORT_OP) {
|
||||
aborting = true;
|
||||
ctxp->flag |= LPFC_NVMET_CTX_RLS;
|
||||
}
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
|
||||
if (aborting)
|
||||
/* let the abort path do the real release */
|
||||
return;
|
||||
|
||||
/* Sanity check */
|
||||
if (ctxp->state != LPFC_NVMET_STE_DONE) {
|
||||
atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6117 Bad state IO x%x aborted\n",
|
||||
ctxp->oxid);
|
||||
}
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
|
||||
ctxp->state, 0);
|
||||
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
}
|
||||
|
||||
static struct nvmet_fc_target_template lpfc_tgttemplate = {
|
||||
.targetport_delete = lpfc_nvmet_targetport_delete,
|
||||
.xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
|
||||
.fcp_op = lpfc_nvmet_xmt_fcp_op,
|
||||
.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
|
||||
|
||||
.max_hw_queues = 1,
|
||||
.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
|
||||
@@ -834,6 +869,7 @@ dropit:
|
||||
ctxp->wqeq = NULL;
|
||||
ctxp->state = LPFC_NVMET_STE_RCV;
|
||||
ctxp->rqb_buffer = (void *)nvmebuf;
|
||||
spin_lock_init(&ctxp->ctxlock);
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
@@ -1595,6 +1631,8 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
uint32_t status, result;
|
||||
unsigned long flags;
|
||||
bool released = false;
|
||||
|
||||
ctxp = cmdwqe->context2;
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
||||
@@ -1609,7 +1647,18 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
result, wcqe->word3);
|
||||
|
||||
ctxp->state = LPFC_NVMET_STE_DONE;
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
|
||||
released = true;
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
|
||||
/*
|
||||
* if transport has released ctx, then can reuse it. Otherwise,
|
||||
* will be recycled by transport release call.
|
||||
*/
|
||||
if (released)
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
@@ -1632,7 +1681,9 @@ lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
{
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
unsigned long flags;
|
||||
uint32_t status, result;
|
||||
bool released = false;
|
||||
|
||||
ctxp = cmdwqe->context2;
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
||||
@@ -1654,7 +1705,19 @@ lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
ctxp->state, ctxp->oxid);
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_DONE;
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
|
||||
released = true;
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
|
||||
/*
|
||||
* if transport has released ctx, then can reuse it. Otherwise,
|
||||
* will be recycled by transport release call.
|
||||
*/
|
||||
if (released)
|
||||
lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
|
||||
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
}
|
||||
|
Reference in New Issue
Block a user