Merge tag 'for-linus-20190125' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A collection of fixes for this release. This contains: - Silence sparse rightfully complaining about non-static wbt functions (Bart) - Fixes for the zoned comments/ioctl documentation (Damien) - direct-io fix that's been lingering for a while (Ernesto) - cgroup writeback fix (Tejun) - Set of NVMe patches for nvme-rdma/tcp (Sagi, Hannes, Raju) - Block recursion tracking fix (Ming) - Fix debugfs command flag naming for a few flags (Jianchao)" * tag 'for-linus-20190125' of git://git.kernel.dk/linux-block: block: Fix comment typo uapi: fix ioctl documentation blk-wbt: Declare local functions static blk-mq: fix the cmd_flag_name array nvme-multipath: drop optimization for static ANA group IDs nvmet-rdma: fix null dereference under heavy load nvme-rdma: rework queue maps handling nvme-tcp: fix timeout handler nvme-rdma: fix timeout handler writeback: synchronize sync(2) against cgroup writeback membership switches block: cover another queue enter recursion via BIO_QUEUE_ENTERED direct-io: allow direct writes to empty inodes
This commit is contained in:
@@ -545,8 +545,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
|
||||
timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
|
||||
ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
|
||||
ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
|
||||
if (!(ctrl->anacap & (1 << 6)))
|
||||
ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
|
||||
ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
|
||||
|
||||
if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
|
||||
dev_err(ctrl->device,
|
||||
|
@@ -119,6 +119,7 @@ struct nvme_rdma_ctrl {
|
||||
|
||||
struct nvme_ctrl ctrl;
|
||||
bool use_inline_data;
|
||||
u32 io_queues[HCTX_MAX_TYPES];
|
||||
};
|
||||
|
||||
static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
|
||||
@@ -165,8 +166,8 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
|
||||
static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
|
||||
{
|
||||
return nvme_rdma_queue_idx(queue) >
|
||||
queue->ctrl->ctrl.opts->nr_io_queues +
|
||||
queue->ctrl->ctrl.opts->nr_write_queues;
|
||||
queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
|
||||
queue->ctrl->io_queues[HCTX_TYPE_READ];
|
||||
}
|
||||
|
||||
static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
|
||||
@@ -661,8 +662,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||
nr_io_queues = min_t(unsigned int, nr_io_queues,
|
||||
ibdev->num_comp_vectors);
|
||||
|
||||
nr_io_queues += min(opts->nr_write_queues, num_online_cpus());
|
||||
nr_io_queues += min(opts->nr_poll_queues, num_online_cpus());
|
||||
if (opts->nr_write_queues) {
|
||||
ctrl->io_queues[HCTX_TYPE_DEFAULT] =
|
||||
min(opts->nr_write_queues, nr_io_queues);
|
||||
nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
||||
} else {
|
||||
ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
|
||||
}
|
||||
|
||||
ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
|
||||
|
||||
if (opts->nr_poll_queues) {
|
||||
ctrl->io_queues[HCTX_TYPE_POLL] =
|
||||
min(opts->nr_poll_queues, num_online_cpus());
|
||||
nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
|
||||
}
|
||||
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
|
||||
if (ret)
|
||||
@@ -1689,18 +1703,28 @@ static enum blk_eh_timer_return
|
||||
nvme_rdma_timeout(struct request *rq, bool reserved)
|
||||
{
|
||||
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_rdma_queue *queue = req->queue;
|
||||
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
|
||||
|
||||
dev_warn(req->queue->ctrl->ctrl.device,
|
||||
"I/O %d QID %d timeout, reset controller\n",
|
||||
rq->tag, nvme_rdma_queue_idx(req->queue));
|
||||
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
|
||||
rq->tag, nvme_rdma_queue_idx(queue));
|
||||
|
||||
/* queue error recovery */
|
||||
nvme_rdma_error_recovery(req->queue->ctrl);
|
||||
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
|
||||
/*
|
||||
* Teardown immediately if controller times out while starting
|
||||
* or we are already started error recovery. all outstanding
|
||||
* requests are completed on shutdown, so we return BLK_EH_DONE.
|
||||
*/
|
||||
flush_work(&ctrl->err_work);
|
||||
nvme_rdma_teardown_io_queues(ctrl, false);
|
||||
nvme_rdma_teardown_admin_queue(ctrl, false);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
|
||||
/* fail with DNR on cmd timeout */
|
||||
nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
|
||||
dev_warn(ctrl->ctrl.device, "starting error recovery\n");
|
||||
nvme_rdma_error_recovery(ctrl);
|
||||
|
||||
return BLK_EH_DONE;
|
||||
return BLK_EH_RESET_TIMER;
|
||||
}
|
||||
|
||||
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
@@ -1779,17 +1803,15 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
||||
struct nvme_rdma_ctrl *ctrl = set->driver_data;
|
||||
|
||||
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
|
||||
set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
|
||||
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
||||
set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
|
||||
if (ctrl->ctrl.opts->nr_write_queues) {
|
||||
/* separate read/write queues */
|
||||
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
||||
ctrl->ctrl.opts->nr_write_queues;
|
||||
set->map[HCTX_TYPE_READ].queue_offset =
|
||||
ctrl->ctrl.opts->nr_write_queues;
|
||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
||||
} else {
|
||||
/* mixed read/write queues */
|
||||
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
||||
ctrl->ctrl.opts->nr_io_queues;
|
||||
set->map[HCTX_TYPE_READ].queue_offset = 0;
|
||||
}
|
||||
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
|
||||
@@ -1799,12 +1821,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
||||
|
||||
if (ctrl->ctrl.opts->nr_poll_queues) {
|
||||
set->map[HCTX_TYPE_POLL].nr_queues =
|
||||
ctrl->ctrl.opts->nr_poll_queues;
|
||||
ctrl->io_queues[HCTX_TYPE_POLL];
|
||||
set->map[HCTX_TYPE_POLL].queue_offset =
|
||||
ctrl->ctrl.opts->nr_io_queues;
|
||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
||||
if (ctrl->ctrl.opts->nr_write_queues)
|
||||
set->map[HCTX_TYPE_POLL].queue_offset +=
|
||||
ctrl->ctrl.opts->nr_write_queues;
|
||||
ctrl->io_queues[HCTX_TYPE_READ];
|
||||
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
|
||||
}
|
||||
return 0;
|
||||
|
@@ -1948,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
|
||||
struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
|
||||
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
|
||||
|
||||
dev_dbg(ctrl->ctrl.device,
|
||||
dev_warn(ctrl->ctrl.device,
|
||||
"queue %d: timeout request %#x type %d\n",
|
||||
nvme_tcp_queue_id(req->queue), rq->tag,
|
||||
pdu->hdr.type);
|
||||
nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
|
||||
|
||||
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
|
||||
union nvme_result res = {};
|
||||
|
||||
nvme_req(rq)->flags |= NVME_REQ_CANCELLED;
|
||||
nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res);
|
||||
/*
|
||||
* Teardown immediately if controller times out while starting
|
||||
* or we are already started error recovery. all outstanding
|
||||
* requests are completed on shutdown, so we return BLK_EH_DONE.
|
||||
*/
|
||||
flush_work(&ctrl->err_work);
|
||||
nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
|
||||
nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
|
||||
/* queue error recovery */
|
||||
dev_warn(ctrl->ctrl.device, "starting error recovery\n");
|
||||
nvme_tcp_error_recovery(&ctrl->ctrl);
|
||||
|
||||
return BLK_EH_RESET_TIMER;
|
||||
|
@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||
static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
|
||||
static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
|
||||
static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
|
||||
static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
|
||||
struct nvmet_rdma_rsp *r);
|
||||
static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
|
||||
struct nvmet_rdma_rsp *r);
|
||||
|
||||
static const struct nvmet_fabrics_ops nvmet_rdma_ops;
|
||||
|
||||
@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
|
||||
spin_unlock_irqrestore(&queue->rsps_lock, flags);
|
||||
|
||||
if (unlikely(!rsp)) {
|
||||
rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
|
||||
if (unlikely(!rsp))
|
||||
return NULL;
|
||||
ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
|
||||
if (unlikely(ret)) {
|
||||
kfree(rsp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rsp->allocated = true;
|
||||
}
|
||||
|
||||
@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(rsp->allocated)) {
|
||||
nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
|
||||
kfree(rsp);
|
||||
return;
|
||||
}
|
||||
|
Reference in New Issue
Block a user