Merge tag 'for-5.4/block-2019-09-16' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: - Two NVMe pull requests: - ana log parse fix from Anton - nvme quirks support for Apple devices from Ben - fix missing bio completion tracing for multipath stack devices from Hannes and Mikhail - IP TOS settings for nvme rdma and tcp transports from Israel - rq_dma_dir cleanups from Israel - tracing for Get LBA Status command from Minwoo - Some nvme-tcp cleanups from Minwoo, Potnuri and Myself - Some consolidation between the fabrics transports for handling the CAP register - reset race with ns scanning fix for fabrics (move fabrics commands to a dedicated request queue with a different lifetime from the admin request queue)." - controller reset and namespace scan races fixes - nvme discovery log change uevent support - naming improvements from Keith - multiple discovery controllers reject fix from James - some regular cleanups from various people - Series fixing (and re-fixing) null_blk debug printing and nr_devices checks (André) - A few pull requests from Song, with fixes from Andy, Guoqing, Guilherme, Neil, Nigel, and Yufen. - REQ_OP_ZONE_RESET_ALL support (Chaitanya) - Bio merge handling unification (Christoph) - Pick default elevator correctly for devices with special needs (Damien) - Block stats fixes (Hou) - Timeout and support devices nbd fixes (Mike) - Series fixing races around elevator switching and device add/remove (Ming) - sed-opal cleanups (Revanth) - Per device weight support for BFQ (Fam) - Support for blk-iocost, a new model that can properly account cost of IO workloads. (Tejun) - blk-cgroup writeback fixes (Tejun) - paride queue init fixes (zhengbin) - blk_set_runtime_active() cleanup (Stanley) - Block segment mapping optimizations (Bart) - lightnvm fixes (Hans/Minwoo/YueHaibing) - Various little fixes and cleanups * tag 'for-5.4/block-2019-09-16' of git://git.kernel.dk/linux-block: (186 commits) null_blk: format pr_* logs with pr_fmt null_blk: match the type of parameter nr_devices null_blk: do not fail the module load with zero devices block: also check RQF_STATS in blk_mq_need_time_stamp() block: make rq sector size accessible for block stats bfq: Fix bfq linkage error raid5: use bio_end_sector in r5_next_bio raid5: remove STRIPE_OPS_REQ_PENDING md: add feature flag MD_FEATURE_RAID0_LAYOUT md/raid0: avoid RAID0 data corruption due to layout confusion. raid5: don't set STRIPE_HANDLE to stripe which is in batch list raid5: don't increment read_errors on EILSEQ return nvmet: fix a wrong error status returned in error log page nvme: send discovery log page change events to userspace nvme: add uevent variables for controller devices nvme: enable aen regardless of the presence of I/O queues nvme-fabrics: allow discovery subsystems accept a kato nvmet: Use PTR_ERR_OR_ZERO() in nvmet_init_discovery() nvme: Remove redundant assignment of cq vector nvme: Assign subsys instance from first ctrl ...
This commit is contained in:
@@ -757,6 +757,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
{
|
||||
if (remove) {
|
||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
||||
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
|
||||
}
|
||||
if (ctrl->async_event_sqe.data) {
|
||||
@@ -798,10 +799,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
goto out_free_async_qe;
|
||||
}
|
||||
|
||||
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
|
||||
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
|
||||
error = PTR_ERR(ctrl->ctrl.fabrics_q);
|
||||
goto out_free_tagset;
|
||||
}
|
||||
|
||||
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
|
||||
if (IS_ERR(ctrl->ctrl.admin_q)) {
|
||||
error = PTR_ERR(ctrl->ctrl.admin_q);
|
||||
goto out_free_tagset;
|
||||
goto out_cleanup_fabrics_q;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -809,24 +816,15 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
if (error)
|
||||
goto out_cleanup_queue;
|
||||
|
||||
error = ctrl->ctrl.ops->reg_read64(&ctrl->ctrl, NVME_REG_CAP,
|
||||
&ctrl->ctrl.cap);
|
||||
if (error) {
|
||||
dev_err(ctrl->ctrl.device,
|
||||
"prop_get NVME_REG_CAP failed\n");
|
||||
goto out_stop_queue;
|
||||
}
|
||||
|
||||
ctrl->ctrl.sqsize =
|
||||
min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
|
||||
|
||||
error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
|
||||
error = nvme_enable_ctrl(&ctrl->ctrl);
|
||||
if (error)
|
||||
goto out_stop_queue;
|
||||
|
||||
ctrl->ctrl.max_hw_sectors =
|
||||
(ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
|
||||
|
||||
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
||||
|
||||
error = nvme_init_identify(&ctrl->ctrl);
|
||||
if (error)
|
||||
goto out_stop_queue;
|
||||
@@ -838,6 +836,9 @@ out_stop_queue:
|
||||
out_cleanup_queue:
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->ctrl.admin_q);
|
||||
out_cleanup_fabrics_q:
|
||||
if (new)
|
||||
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
|
||||
out_free_tagset:
|
||||
if (new)
|
||||
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
|
||||
@@ -907,10 +908,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
{
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
nvme_rdma_stop_queue(&ctrl->queues[0]);
|
||||
if (ctrl->ctrl.admin_tagset)
|
||||
if (ctrl->ctrl.admin_tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
|
||||
nvme_cancel_request, &ctrl->ctrl);
|
||||
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
|
||||
}
|
||||
if (remove)
|
||||
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
||||
nvme_rdma_destroy_admin_queue(ctrl, remove);
|
||||
}
|
||||
|
||||
@@ -920,9 +924,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
|
||||
if (ctrl->ctrl.queue_count > 1) {
|
||||
nvme_stop_queues(&ctrl->ctrl);
|
||||
nvme_rdma_stop_io_queues(ctrl);
|
||||
if (ctrl->ctrl.tagset)
|
||||
if (ctrl->ctrl.tagset) {
|
||||
blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
|
||||
nvme_cancel_request, &ctrl->ctrl);
|
||||
blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
|
||||
}
|
||||
if (remove)
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
nvme_rdma_destroy_io_queues(ctrl, remove);
|
||||
@@ -1059,6 +1065,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
||||
nvme_rdma_teardown_io_queues(ctrl, false);
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
nvme_rdma_teardown_admin_queue(ctrl, false);
|
||||
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
||||
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
||||
/* state change failure is ok if we're in DELETING state */
|
||||
@@ -1145,9 +1152,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
|
||||
req->mr = NULL;
|
||||
}
|
||||
|
||||
ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
|
||||
req->nents, rq_data_dir(rq) ==
|
||||
WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
|
||||
|
||||
nvme_cleanup_cmd(rq);
|
||||
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
|
||||
@@ -1273,7 +1278,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
|
||||
req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl);
|
||||
|
||||
count = ib_dma_map_sg(ibdev, req->sg_table.sgl, req->nents,
|
||||
rq_data_dir(rq) == WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
rq_dma_dir(rq));
|
||||
if (unlikely(count <= 0)) {
|
||||
ret = -EIO;
|
||||
goto out_free_table;
|
||||
@@ -1302,9 +1307,7 @@ out:
|
||||
return 0;
|
||||
|
||||
out_unmap_sg:
|
||||
ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
|
||||
req->nents, rq_data_dir(rq) ==
|
||||
WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
|
||||
out_free_table:
|
||||
sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
|
||||
return ret;
|
||||
@@ -1547,16 +1550,18 @@ static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
|
||||
|
||||
static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
|
||||
{
|
||||
struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
|
||||
int ret;
|
||||
|
||||
ret = nvme_rdma_create_queue_ib(queue);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ctrl->opts->tos >= 0)
|
||||
rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
|
||||
ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
|
||||
if (ret) {
|
||||
dev_err(queue->ctrl->ctrl.device,
|
||||
"rdma_resolve_route failed (%d).\n",
|
||||
dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
|
||||
queue->cm_error);
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
@@ -1869,10 +1874,11 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
|
||||
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
||||
|
||||
nvme_rdma_teardown_io_queues(ctrl, shutdown);
|
||||
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
|
||||
if (shutdown)
|
||||
nvme_shutdown_ctrl(&ctrl->ctrl);
|
||||
else
|
||||
nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
|
||||
nvme_disable_ctrl(&ctrl->ctrl);
|
||||
nvme_rdma_teardown_admin_queue(ctrl, shutdown);
|
||||
}
|
||||
|
||||
@@ -2051,7 +2057,8 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
|
||||
.required_opts = NVMF_OPT_TRADDR,
|
||||
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
|
||||
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
|
||||
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES,
|
||||
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
|
||||
NVMF_OPT_TOS,
|
||||
.create_ctrl = nvme_rdma_create_ctrl,
|
||||
};
|
||||
|
||||
|
Reference in New Issue
Block a user