Merge tag 'v4.20-rc5' into for-4.21/block
Pull in v4.20-rc5, solving a conflict we'll otherwise get in aio.c and also getting the merge fix that went into mainline that users are hitting testing for-4.21/block and/or for-next. * tag 'v4.20-rc5': (664 commits) Linux 4.20-rc5 PCI: Fix incorrect value returned from pcie_get_speed_cap() MAINTAINERS: Update linux-mips mailing list address ocfs2: fix potential use after free mm/khugepaged: fix the xas_create_range() error path mm/khugepaged: collapse_shmem() do not crash on Compound mm/khugepaged: collapse_shmem() without freezing new_page mm/khugepaged: minor reorderings in collapse_shmem() mm/khugepaged: collapse_shmem() remember to clear holes mm/khugepaged: fix crashes due to misaccounted holes mm/khugepaged: collapse_shmem() stop if punched or truncated mm/huge_memory: fix lockdep complaint on 32-bit i_size_read() mm/huge_memory: splitting set mapping+index before unfreeze mm/huge_memory: rename freeze_page() to unmap_page() initramfs: clean old path before creating a hardlink kernel/kcov.c: mark funcs in __sanitizer_cov_trace_pc() as notrace psi: make disabling/enabling easier for vendor kernels proc: fixup map_files test on arm debugobjects: avoid recursive calls with kmemleak userfaultfd: shmem: UFFDIO_COPY: set the page dirty if VM_WRITE is not set ...
This commit is contained in:
@@ -152,6 +152,7 @@ struct nvme_fc_ctrl {
|
||||
|
||||
bool ioq_live;
|
||||
bool assoc_active;
|
||||
atomic_t err_work_active;
|
||||
u64 association_id;
|
||||
|
||||
struct list_head ctrl_list; /* rport->ctrl_list */
|
||||
@@ -160,6 +161,7 @@ struct nvme_fc_ctrl {
|
||||
struct blk_mq_tag_set tag_set;
|
||||
|
||||
struct delayed_work connect_work;
|
||||
struct work_struct err_work;
|
||||
|
||||
struct kref ref;
|
||||
u32 flags;
|
||||
@@ -1531,6 +1533,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
|
||||
struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
|
||||
int i;
|
||||
|
||||
/* ensure we've initialized the ops once */
|
||||
if (!(aen_op->flags & FCOP_FLAGS_AEN))
|
||||
return;
|
||||
|
||||
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
|
||||
__nvme_fc_abort_op(ctrl, aen_op);
|
||||
}
|
||||
@@ -1746,12 +1752,12 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
|
||||
int res;
|
||||
|
||||
nvme_req(rq)->ctrl = &ctrl->ctrl;
|
||||
res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
|
||||
if (res)
|
||||
return res;
|
||||
op->op.fcp_req.first_sgl = &op->sgl[0];
|
||||
op->op.fcp_req.private = &op->priv[0];
|
||||
nvme_req(rq)->ctrl = &ctrl->ctrl;
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -2049,7 +2055,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
|
||||
static void
|
||||
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
|
||||
{
|
||||
/* only proceed if in LIVE state - e.g. on first error */
|
||||
int active;
|
||||
|
||||
/*
|
||||
* if an error (io timeout, etc) while (re)connecting,
|
||||
* it's an error on creating the new association.
|
||||
* Start the error recovery thread if it hasn't already
|
||||
* been started. It is expected there could be multiple
|
||||
* ios hitting this path before things are cleaned up.
|
||||
*/
|
||||
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
|
||||
active = atomic_xchg(&ctrl->err_work_active, 1);
|
||||
if (!active && !schedule_work(&ctrl->err_work)) {
|
||||
atomic_set(&ctrl->err_work_active, 0);
|
||||
WARN_ON(1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
|
||||
if (ctrl->ctrl.state != NVME_CTRL_LIVE)
|
||||
return;
|
||||
|
||||
@@ -2782,6 +2806,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
|
||||
{
|
||||
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
|
||||
|
||||
cancel_work_sync(&ctrl->err_work);
|
||||
cancel_delayed_work_sync(&ctrl->connect_work);
|
||||
/*
|
||||
* kill the association on the link side. this will block
|
||||
@@ -2833,6 +2858,21 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
|
||||
{
|
||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||
|
||||
/* will block will waiting for io to terminate */
|
||||
nvme_fc_delete_association(ctrl);
|
||||
|
||||
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
|
||||
!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
|
||||
dev_err(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: error_recovery: Couldn't change state "
|
||||
"to CONNECTING\n", ctrl->cnum);
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_fc_reset_ctrl_work(struct work_struct *work)
|
||||
{
|
||||
@@ -2840,18 +2880,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
|
||||
container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
|
||||
int ret;
|
||||
|
||||
__nvme_fc_terminate_io(ctrl);
|
||||
|
||||
nvme_stop_ctrl(&ctrl->ctrl);
|
||||
|
||||
/* will block will waiting for io to terminate */
|
||||
nvme_fc_delete_association(ctrl);
|
||||
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
||||
dev_err(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: error_recovery: Couldn't change state "
|
||||
"to CONNECTING\n", ctrl->cnum);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
|
||||
ret = nvme_fc_create_association(ctrl);
|
||||
else
|
||||
@@ -2865,6 +2897,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
|
||||
ctrl->cnum);
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_fc_connect_err_work(struct work_struct *work)
|
||||
{
|
||||
struct nvme_fc_ctrl *ctrl =
|
||||
container_of(work, struct nvme_fc_ctrl, err_work);
|
||||
|
||||
__nvme_fc_terminate_io(ctrl);
|
||||
|
||||
atomic_set(&ctrl->err_work_active, 0);
|
||||
|
||||
/*
|
||||
* Rescheduling the connection after recovering
|
||||
* from the io error is left to the reconnect work
|
||||
* item, which is what should have stalled waiting on
|
||||
* the io that had the error that scheduled this work.
|
||||
*/
|
||||
}
|
||||
|
||||
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
|
||||
.name = "fc",
|
||||
.module = THIS_MODULE,
|
||||
@@ -2975,6 +3025,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||
ctrl->cnum = idx;
|
||||
ctrl->ioq_live = false;
|
||||
ctrl->assoc_active = false;
|
||||
atomic_set(&ctrl->err_work_active, 0);
|
||||
init_waitqueue_head(&ctrl->ioabort_wait);
|
||||
|
||||
get_device(ctrl->dev);
|
||||
@@ -2982,6 +3033,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||
|
||||
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
|
||||
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
|
||||
INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
|
||||
spin_lock_init(&ctrl->lock);
|
||||
|
||||
/* io queue count */
|
||||
@@ -3071,6 +3123,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||
fail_ctrl:
|
||||
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
|
||||
cancel_work_sync(&ctrl->ctrl.reset_work);
|
||||
cancel_work_sync(&ctrl->err_work);
|
||||
cancel_delayed_work_sync(&ctrl->connect_work);
|
||||
|
||||
ctrl->ctrl.opts = NULL;
|
||||
|
Reference in New Issue
Block a user