nvme: move queue_count to the nvme_ctrl
All all transports use the queue_count in exactly the same, so move it to the generic struct nvme_ctrl. In the future it will also be maintained by the core. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-By: James Smart <james.smart@broadcom.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Reviewed-by: Max Gurtovoy <maxg@mellanox.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
This commit is contained in:
@@ -148,7 +148,6 @@ struct nvme_fc_ctrl {
|
|||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct nvme_fc_lport *lport;
|
struct nvme_fc_lport *lport;
|
||||||
struct nvme_fc_rport *rport;
|
struct nvme_fc_rport *rport;
|
||||||
u32 queue_count;
|
|
||||||
u32 cnum;
|
u32 cnum;
|
||||||
|
|
||||||
u64 association_id;
|
u64 association_id;
|
||||||
@@ -1614,7 +1613,7 @@ nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++)
|
for (i = 1; i < ctrl->ctrl.queue_count; i++)
|
||||||
nvme_fc_free_queue(&ctrl->queues[i]);
|
nvme_fc_free_queue(&ctrl->queues[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1635,10 +1634,10 @@ __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
|
|||||||
static void
|
static void
|
||||||
nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
|
nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
|
||||||
{
|
{
|
||||||
struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
|
struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
|
for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
|
||||||
__nvme_fc_delete_hw_queue(ctrl, queue, i);
|
__nvme_fc_delete_hw_queue(ctrl, queue, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1648,7 +1647,7 @@ nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
|
|||||||
struct nvme_fc_queue *queue = &ctrl->queues[1];
|
struct nvme_fc_queue *queue = &ctrl->queues[1];
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++, queue++) {
|
for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
|
||||||
ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
|
ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto delete_queues;
|
goto delete_queues;
|
||||||
@@ -1667,7 +1666,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
|
|||||||
{
|
{
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++) {
|
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
|
||||||
ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
|
ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
|
||||||
(qsize / 5));
|
(qsize / 5));
|
||||||
if (ret)
|
if (ret)
|
||||||
@@ -1685,7 +1684,7 @@ nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++)
|
for (i = 1; i < ctrl->ctrl.queue_count; i++)
|
||||||
nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
|
nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2187,7 +2186,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ctrl->queue_count = opts->nr_io_queues + 1;
|
ctrl->ctrl.queue_count = opts->nr_io_queues + 1;
|
||||||
if (!opts->nr_io_queues)
|
if (!opts->nr_io_queues)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@@ -2204,7 +2203,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
|
|||||||
sizeof(struct scatterlist)) +
|
sizeof(struct scatterlist)) +
|
||||||
ctrl->lport->ops->fcprqst_priv_sz;
|
ctrl->lport->ops->fcprqst_priv_sz;
|
||||||
ctrl->tag_set.driver_data = ctrl;
|
ctrl->tag_set.driver_data = ctrl;
|
||||||
ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
|
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
|
||||||
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
|
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
|
||||||
|
|
||||||
ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
|
ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
|
||||||
@@ -2258,7 +2257,7 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* check for io queues existing */
|
/* check for io queues existing */
|
||||||
if (ctrl->queue_count == 1)
|
if (ctrl->ctrl.queue_count == 1)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
nvme_fc_init_io_queues(ctrl);
|
nvme_fc_init_io_queues(ctrl);
|
||||||
@@ -2381,7 +2380,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
|||||||
* Create the io queues
|
* Create the io queues
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
if (ctrl->ctrl.state == NVME_CTRL_NEW)
|
if (ctrl->ctrl.state == NVME_CTRL_NEW)
|
||||||
ret = nvme_fc_create_io_queues(ctrl);
|
ret = nvme_fc_create_io_queues(ctrl);
|
||||||
else
|
else
|
||||||
@@ -2395,7 +2394,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
|
|||||||
|
|
||||||
ctrl->ctrl.nr_reconnects = 0;
|
ctrl->ctrl.nr_reconnects = 0;
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
nvme_start_queues(&ctrl->ctrl);
|
nvme_start_queues(&ctrl->ctrl);
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
nvme_queue_scan(&ctrl->ctrl);
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
nvme_queue_async_events(&ctrl->ctrl);
|
||||||
@@ -2447,7 +2446,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
|
|||||||
* io requests back to the block layer as part of normal completions
|
* io requests back to the block layer as part of normal completions
|
||||||
* (but with error status).
|
* (but with error status).
|
||||||
*/
|
*/
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
nvme_stop_queues(&ctrl->ctrl);
|
nvme_stop_queues(&ctrl->ctrl);
|
||||||
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
||||||
nvme_fc_terminate_exchange, &ctrl->ctrl);
|
nvme_fc_terminate_exchange, &ctrl->ctrl);
|
||||||
@@ -2702,18 +2701,18 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|||||||
spin_lock_init(&ctrl->lock);
|
spin_lock_init(&ctrl->lock);
|
||||||
|
|
||||||
/* io queue count */
|
/* io queue count */
|
||||||
ctrl->queue_count = min_t(unsigned int,
|
ctrl->ctrl.queue_count = min_t(unsigned int,
|
||||||
opts->nr_io_queues,
|
opts->nr_io_queues,
|
||||||
lport->ops->max_hw_queues);
|
lport->ops->max_hw_queues);
|
||||||
opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
|
opts->nr_io_queues = ctrl->ctrl.queue_count; /* so opts has valid value */
|
||||||
ctrl->queue_count++; /* +1 for admin queue */
|
ctrl->ctrl.queue_count++; /* +1 for admin queue */
|
||||||
|
|
||||||
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
||||||
ctrl->ctrl.kato = opts->kato;
|
ctrl->ctrl.kato = opts->kato;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
|
ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
|
||||||
GFP_KERNEL);
|
sizeof(struct nvme_fc_queue), GFP_KERNEL);
|
||||||
if (!ctrl->queues)
|
if (!ctrl->queues)
|
||||||
goto out_free_ida;
|
goto out_free_ida;
|
||||||
|
|
||||||
|
@@ -142,6 +142,7 @@ struct nvme_ctrl {
|
|||||||
u16 cntlid;
|
u16 cntlid;
|
||||||
|
|
||||||
u32 ctrl_config;
|
u32 ctrl_config;
|
||||||
|
u32 queue_count;
|
||||||
|
|
||||||
u32 page_size;
|
u32 page_size;
|
||||||
u32 max_hw_sectors;
|
u32 max_hw_sectors;
|
||||||
|
@@ -74,7 +74,6 @@ struct nvme_dev {
|
|||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct dma_pool *prp_page_pool;
|
struct dma_pool *prp_page_pool;
|
||||||
struct dma_pool *prp_small_pool;
|
struct dma_pool *prp_small_pool;
|
||||||
unsigned queue_count;
|
|
||||||
unsigned online_queues;
|
unsigned online_queues;
|
||||||
unsigned max_qid;
|
unsigned max_qid;
|
||||||
int q_depth;
|
int q_depth;
|
||||||
@@ -1099,9 +1098,9 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = dev->queue_count - 1; i >= lowest; i--) {
|
for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
|
||||||
struct nvme_queue *nvmeq = dev->queues[i];
|
struct nvme_queue *nvmeq = dev->queues[i];
|
||||||
dev->queue_count--;
|
dev->ctrl.queue_count--;
|
||||||
dev->queues[i] = NULL;
|
dev->queues[i] = NULL;
|
||||||
nvme_free_queue(nvmeq);
|
nvme_free_queue(nvmeq);
|
||||||
}
|
}
|
||||||
@@ -1221,7 +1220,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
|
|||||||
nvmeq->qid = qid;
|
nvmeq->qid = qid;
|
||||||
nvmeq->cq_vector = -1;
|
nvmeq->cq_vector = -1;
|
||||||
dev->queues[qid] = nvmeq;
|
dev->queues[qid] = nvmeq;
|
||||||
dev->queue_count++;
|
dev->ctrl.queue_count++;
|
||||||
|
|
||||||
return nvmeq;
|
return nvmeq;
|
||||||
|
|
||||||
@@ -1441,7 +1440,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
|
|||||||
unsigned i, max;
|
unsigned i, max;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
for (i = dev->queue_count; i <= dev->max_qid; i++) {
|
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
|
||||||
/* vector == qid - 1, match nvme_create_queue */
|
/* vector == qid - 1, match nvme_create_queue */
|
||||||
if (!nvme_alloc_queue(dev, i, dev->q_depth,
|
if (!nvme_alloc_queue(dev, i, dev->q_depth,
|
||||||
pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
|
pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) {
|
||||||
@@ -1450,7 +1449,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
max = min(dev->max_qid, dev->queue_count - 1);
|
max = min(dev->max_qid, dev->ctrl.queue_count - 1);
|
||||||
for (i = dev->online_queues; i <= max; i++) {
|
for (i = dev->online_queues; i <= max; i++) {
|
||||||
ret = nvme_create_queue(dev->queues[i], i);
|
ret = nvme_create_queue(dev->queues[i], i);
|
||||||
if (ret)
|
if (ret)
|
||||||
@@ -2001,7 +2000,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|||||||
nvme_stop_queues(&dev->ctrl);
|
nvme_stop_queues(&dev->ctrl);
|
||||||
|
|
||||||
queues = dev->online_queues - 1;
|
queues = dev->online_queues - 1;
|
||||||
for (i = dev->queue_count - 1; i > 0; i--)
|
for (i = dev->ctrl.queue_count - 1; i > 0; i--)
|
||||||
nvme_suspend_queue(dev->queues[i]);
|
nvme_suspend_queue(dev->queues[i]);
|
||||||
|
|
||||||
if (dead) {
|
if (dead) {
|
||||||
@@ -2009,7 +2008,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
|
|||||||
* probe, before the admin queue is configured. Thus,
|
* probe, before the admin queue is configured. Thus,
|
||||||
* queue_count can be 0 here.
|
* queue_count can be 0 here.
|
||||||
*/
|
*/
|
||||||
if (dev->queue_count)
|
if (dev->ctrl.queue_count)
|
||||||
nvme_suspend_queue(dev->queues[0]);
|
nvme_suspend_queue(dev->queues[0]);
|
||||||
} else {
|
} else {
|
||||||
nvme_disable_io_queues(dev, queues);
|
nvme_disable_io_queues(dev, queues);
|
||||||
|
@@ -103,7 +103,6 @@ struct nvme_rdma_queue {
|
|||||||
struct nvme_rdma_ctrl {
|
struct nvme_rdma_ctrl {
|
||||||
/* read only in the hot path */
|
/* read only in the hot path */
|
||||||
struct nvme_rdma_queue *queues;
|
struct nvme_rdma_queue *queues;
|
||||||
u32 queue_count;
|
|
||||||
|
|
||||||
/* other member variables */
|
/* other member variables */
|
||||||
struct blk_mq_tag_set tag_set;
|
struct blk_mq_tag_set tag_set;
|
||||||
@@ -349,7 +348,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
|||||||
struct nvme_rdma_ctrl *ctrl = data;
|
struct nvme_rdma_ctrl *ctrl = data;
|
||||||
struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
|
struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
|
||||||
|
|
||||||
BUG_ON(hctx_idx >= ctrl->queue_count);
|
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
|
||||||
|
|
||||||
hctx->driver_data = queue;
|
hctx->driver_data = queue;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -587,7 +586,7 @@ static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++)
|
for (i = 1; i < ctrl->ctrl.queue_count; i++)
|
||||||
nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
|
nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -595,7 +594,7 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
|
|||||||
{
|
{
|
||||||
int i, ret = 0;
|
int i, ret = 0;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++) {
|
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
|
||||||
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
|
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_info(ctrl->ctrl.device,
|
dev_info(ctrl->ctrl.device,
|
||||||
@@ -623,14 +622,14 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ctrl->queue_count = nr_io_queues + 1;
|
ctrl->ctrl.queue_count = nr_io_queues + 1;
|
||||||
if (ctrl->queue_count < 2)
|
if (ctrl->ctrl.queue_count < 2)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
dev_info(ctrl->ctrl.device,
|
dev_info(ctrl->ctrl.device,
|
||||||
"creating %d I/O queues.\n", nr_io_queues);
|
"creating %d I/O queues.\n", nr_io_queues);
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++) {
|
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
|
||||||
ret = nvme_rdma_init_queue(ctrl, i,
|
ret = nvme_rdma_init_queue(ctrl, i,
|
||||||
ctrl->ctrl.opts->queue_size);
|
ctrl->ctrl.opts->queue_size);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@@ -705,7 +704,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
|||||||
|
|
||||||
++ctrl->ctrl.nr_reconnects;
|
++ctrl->ctrl.nr_reconnects;
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
nvme_rdma_free_io_queues(ctrl);
|
nvme_rdma_free_io_queues(ctrl);
|
||||||
|
|
||||||
ret = blk_mq_reinit_tagset(&ctrl->tag_set);
|
ret = blk_mq_reinit_tagset(&ctrl->tag_set);
|
||||||
@@ -735,7 +734,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
|||||||
|
|
||||||
nvme_start_keep_alive(&ctrl->ctrl);
|
nvme_start_keep_alive(&ctrl->ctrl);
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
ret = nvme_rdma_init_io_queues(ctrl);
|
ret = nvme_rdma_init_io_queues(ctrl);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto requeue;
|
goto requeue;
|
||||||
@@ -749,7 +748,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
|
|||||||
WARN_ON_ONCE(!changed);
|
WARN_ON_ONCE(!changed);
|
||||||
ctrl->ctrl.nr_reconnects = 0;
|
ctrl->ctrl.nr_reconnects = 0;
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
nvme_queue_scan(&ctrl->ctrl);
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
nvme_queue_async_events(&ctrl->ctrl);
|
||||||
}
|
}
|
||||||
@@ -772,15 +771,15 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
|||||||
|
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||||
|
|
||||||
for (i = 0; i < ctrl->queue_count; i++)
|
for (i = 0; i < ctrl->ctrl.queue_count; i++)
|
||||||
clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
|
clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
|
||||||
|
|
||||||
if (ctrl->queue_count > 1)
|
if (ctrl->ctrl.queue_count > 1)
|
||||||
nvme_stop_queues(&ctrl->ctrl);
|
nvme_stop_queues(&ctrl->ctrl);
|
||||||
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
|
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
|
||||||
|
|
||||||
/* We must take care of fastfail/requeue all our inflight requests */
|
/* We must take care of fastfail/requeue all our inflight requests */
|
||||||
if (ctrl->queue_count > 1)
|
if (ctrl->ctrl.queue_count > 1)
|
||||||
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
||||||
nvme_cancel_request, &ctrl->ctrl);
|
nvme_cancel_request, &ctrl->ctrl);
|
||||||
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
|
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
|
||||||
@@ -1624,7 +1623,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl)
|
|||||||
cancel_work_sync(&ctrl->err_work);
|
cancel_work_sync(&ctrl->err_work);
|
||||||
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
cancel_delayed_work_sync(&ctrl->reconnect_work);
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
nvme_stop_queues(&ctrl->ctrl);
|
nvme_stop_queues(&ctrl->ctrl);
|
||||||
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
||||||
nvme_cancel_request, &ctrl->ctrl);
|
nvme_cancel_request, &ctrl->ctrl);
|
||||||
@@ -1716,7 +1715,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
|||||||
goto del_dead_ctrl;
|
goto del_dead_ctrl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
ret = blk_mq_reinit_tagset(&ctrl->tag_set);
|
ret = blk_mq_reinit_tagset(&ctrl->tag_set);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto del_dead_ctrl;
|
goto del_dead_ctrl;
|
||||||
@@ -1733,7 +1732,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
|||||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
|
||||||
WARN_ON_ONCE(!changed);
|
WARN_ON_ONCE(!changed);
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
nvme_start_queues(&ctrl->ctrl);
|
nvme_start_queues(&ctrl->ctrl);
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
nvme_queue_scan(&ctrl->ctrl);
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
nvme_queue_async_events(&ctrl->ctrl);
|
||||||
@@ -1785,7 +1784,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
|
|||||||
ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
|
ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) +
|
||||||
SG_CHUNK_SIZE * sizeof(struct scatterlist);
|
SG_CHUNK_SIZE * sizeof(struct scatterlist);
|
||||||
ctrl->tag_set.driver_data = ctrl;
|
ctrl->tag_set.driver_data = ctrl;
|
||||||
ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
|
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
|
||||||
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
|
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
|
||||||
|
|
||||||
ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
|
ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
|
||||||
@@ -1863,12 +1862,12 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||||||
INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
|
INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work);
|
||||||
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
|
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
|
||||||
|
|
||||||
ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
|
ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
|
||||||
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
||||||
ctrl->ctrl.kato = opts->kato;
|
ctrl->ctrl.kato = opts->kato;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
ctrl->queues = kcalloc(ctrl->queue_count, sizeof(*ctrl->queues),
|
ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!ctrl->queues)
|
if (!ctrl->queues)
|
||||||
goto out_uninit_ctrl;
|
goto out_uninit_ctrl;
|
||||||
@@ -1925,7 +1924,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||||||
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
|
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
|
||||||
mutex_unlock(&nvme_rdma_ctrl_mutex);
|
mutex_unlock(&nvme_rdma_ctrl_mutex);
|
||||||
|
|
||||||
if (opts->nr_io_queues) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
nvme_queue_scan(&ctrl->ctrl);
|
nvme_queue_scan(&ctrl->ctrl);
|
||||||
nvme_queue_async_events(&ctrl->ctrl);
|
nvme_queue_async_events(&ctrl->ctrl);
|
||||||
}
|
}
|
||||||
|
@@ -44,7 +44,6 @@ struct nvme_loop_iod {
|
|||||||
|
|
||||||
struct nvme_loop_ctrl {
|
struct nvme_loop_ctrl {
|
||||||
struct nvme_loop_queue *queues;
|
struct nvme_loop_queue *queues;
|
||||||
u32 queue_count;
|
|
||||||
|
|
||||||
struct blk_mq_tag_set admin_tag_set;
|
struct blk_mq_tag_set admin_tag_set;
|
||||||
|
|
||||||
@@ -241,7 +240,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
|||||||
struct nvme_loop_ctrl *ctrl = data;
|
struct nvme_loop_ctrl *ctrl = data;
|
||||||
struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
|
struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
|
||||||
|
|
||||||
BUG_ON(hctx_idx >= ctrl->queue_count);
|
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
|
||||||
|
|
||||||
hctx->driver_data = queue;
|
hctx->driver_data = queue;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -307,7 +306,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++)
|
for (i = 1; i < ctrl->ctrl.queue_count; i++)
|
||||||
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -330,7 +329,7 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto out_destroy_queues;
|
goto out_destroy_queues;
|
||||||
|
|
||||||
ctrl->queue_count++;
|
ctrl->ctrl.queue_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -344,7 +343,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
|
|||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++) {
|
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
|
||||||
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
|
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@@ -372,7 +371,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
|
|||||||
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
|
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
|
||||||
if (error)
|
if (error)
|
||||||
return error;
|
return error;
|
||||||
ctrl->queue_count = 1;
|
ctrl->ctrl.queue_count = 1;
|
||||||
|
|
||||||
error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
|
error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
|
||||||
if (error)
|
if (error)
|
||||||
@@ -426,7 +425,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
|
|||||||
{
|
{
|
||||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||||
|
|
||||||
if (ctrl->queue_count > 1) {
|
if (ctrl->ctrl.queue_count > 1) {
|
||||||
nvme_stop_queues(&ctrl->ctrl);
|
nvme_stop_queues(&ctrl->ctrl);
|
||||||
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
blk_mq_tagset_busy_iter(&ctrl->tag_set,
|
||||||
nvme_cancel_request, &ctrl->ctrl);
|
nvme_cancel_request, &ctrl->ctrl);
|
||||||
@@ -559,7 +558,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
|
|||||||
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
|
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
|
||||||
SG_CHUNK_SIZE * sizeof(struct scatterlist);
|
SG_CHUNK_SIZE * sizeof(struct scatterlist);
|
||||||
ctrl->tag_set.driver_data = ctrl;
|
ctrl->tag_set.driver_data = ctrl;
|
||||||
ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
|
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
|
||||||
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
|
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
|
||||||
ctrl->ctrl.tagset = &ctrl->tag_set;
|
ctrl->ctrl.tagset = &ctrl->tag_set;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user