Merge tag 'for-linus-20181109' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe: - Two fixes for an ubd regression, one for missing locking, and one for a missing initialization of a field. The latter was an old latent bug, but it's now visible and triggers (Me, Anton Ivanov) - Set of NVMe fixes via Christoph, but applied manually due to a git tree mixup (Christoph, Sagi) - Fix for a discard split regression, in three patches (Ming) - Update libata git trees (Geert) - SPDX identifier for sata_rcar (Kuninori Morimoto) - Virtual boundary merge fix (Johannes) - Preemptively clear memory we are going to pass to userspace, in case the driver does a short read (Keith) * tag 'for-linus-20181109' of git://git.kernel.dk/linux-block: block: make sure writesame bio is aligned with logical block size block: cleanup __blkdev_issue_discard() block: make sure discard bio is aligned with logical block size Revert "nvmet-rdma: use a private workqueue for delete" nvme: make sure ns head inherits underlying device limits nvmet: don't try to add ns to p2p map unless it actually uses it sata_rcar: convert to SPDX identifiers ubd: fix missing initialization of io_req block: Clear kernel memory before copying to user MAINTAINERS: Fix remaining pointers to obsolete libata.git ubd: fix missing lock around request issue block: respect virtual boundary mask in bvecs
This commit is contained in:
@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
|
||||
if (ns->ndev)
|
||||
nvme_nvm_update_nvm_info(ns);
|
||||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
if (ns->head->disk)
|
||||
if (ns->head->disk) {
|
||||
nvme_update_disk_info(ns->head->disk, ns, id);
|
||||
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
|
||||
/* set to a default value for 512 until disk is validated */
|
||||
blk_queue_logical_block_size(q, 512);
|
||||
blk_set_stacking_limits(&q->limits);
|
||||
|
||||
/* we need to propagate up the VMC settings */
|
||||
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
|
||||
|
@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
|
||||
struct pci_dev *p2p_dev;
|
||||
int ret;
|
||||
|
||||
if (!ctrl->p2p_client)
|
||||
if (!ctrl->p2p_client || !ns->use_p2pmem)
|
||||
return;
|
||||
|
||||
if (ns->p2p_dev) {
|
||||
|
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
|
||||
int inline_page_count;
|
||||
};
|
||||
|
||||
static struct workqueue_struct *nvmet_rdma_delete_wq;
|
||||
static bool nvmet_rdma_use_srq;
|
||||
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
|
||||
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
|
||||
@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
|
||||
|
||||
if (queue->host_qid == 0) {
|
||||
/* Let inflight controller teardown complete */
|
||||
flush_workqueue(nvmet_rdma_delete_wq);
|
||||
flush_scheduled_work();
|
||||
}
|
||||
|
||||
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
|
||||
if (ret) {
|
||||
queue_work(nvmet_rdma_delete_wq, &queue->release_work);
|
||||
schedule_work(&queue->release_work);
|
||||
/* Destroying rdma_cm id is not needed here */
|
||||
return 0;
|
||||
}
|
||||
@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
|
||||
|
||||
if (disconnect) {
|
||||
rdma_disconnect(queue->cm_id);
|
||||
queue_work(nvmet_rdma_delete_wq, &queue->release_work);
|
||||
schedule_work(&queue->release_work);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
|
||||
mutex_unlock(&nvmet_rdma_queue_mutex);
|
||||
|
||||
pr_err("failed to connect queue %d\n", queue->idx);
|
||||
queue_work(nvmet_rdma_delete_wq, &queue->release_work);
|
||||
schedule_work(&queue->release_work);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
|
||||
if (ret)
|
||||
goto err_ib_client;
|
||||
|
||||
nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
|
||||
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
|
||||
if (!nvmet_rdma_delete_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unreg_transport;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_unreg_transport:
|
||||
nvmet_unregister_transport(&nvmet_rdma_ops);
|
||||
err_ib_client:
|
||||
ib_unregister_client(&nvmet_rdma_ib_client);
|
||||
return ret;
|
||||
@@ -1674,7 +1664,6 @@ err_ib_client:
|
||||
|
||||
static void __exit nvmet_rdma_exit(void)
|
||||
{
|
||||
destroy_workqueue(nvmet_rdma_delete_wq);
|
||||
nvmet_unregister_transport(&nvmet_rdma_ops);
|
||||
ib_unregister_client(&nvmet_rdma_ib_client);
|
||||
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
|
||||
|
Reference in New Issue
Block a user