Merge branch 'for-4.5/nvme' of git://git.kernel.dk/linux-block
Pull NVMe updates from Jens Axboe: "Last branch for this series is the nvme changes. It's in a separate branch to avoid splitting too much between core and NVMe changes, since NVMe is still helping drive some blk-mq changes. That said, not a huge amount of core changes in here. The grunt of the work is the continued split of the code" * 'for-4.5/nvme' of git://git.kernel.dk/linux-block: (67 commits) uapi: update install list after nvme.h rename NVMe: Export NVMe attributes to sysfs group NVMe: Shutdown controller only for power-off NVMe: IO queue deletion re-write NVMe: Remove queue freezing on resets NVMe: Use a retryable error code on reset NVMe: Fix admin queue ring wrap nvme: make SG_IO support optional nvme: fixes for NVME_IOCTL_IO_CMD on the char device nvme: synchronize access to ctrl->namespaces nvme: Move nvme_freeze/unfreeze_queues to nvme core PCI/AER: include header file NVMe: Export namespace attributes to sysfs NVMe: Add pci error handlers block: remove REQ_NO_TIMEOUT flag nvme: merge iod and cmd_info nvme: meta_sg doesn't have to be an array nvme: properly free resources for cancelled command nvme: simplify completion handling nvme: special case AEN requests ...
This commit is contained in:
@@ -294,7 +294,6 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
|
||||
static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
|
||||
{
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
struct nvme_nvm_id *nvme_nvm_id;
|
||||
struct nvme_nvm_command c = {};
|
||||
int ret;
|
||||
@@ -307,7 +306,7 @@ static int nvme_nvm_identity(struct nvm_dev *nvmdev, struct nvm_id *nvm_id)
|
||||
if (!nvme_nvm_id)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
|
||||
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
|
||||
nvme_nvm_id, sizeof(struct nvme_nvm_id));
|
||||
if (ret) {
|
||||
ret = -EIO;
|
||||
@@ -332,9 +331,8 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
|
||||
nvm_l2p_update_fn *update_l2p, void *priv)
|
||||
{
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
struct nvme_nvm_command c = {};
|
||||
u32 len = queue_max_hw_sectors(dev->admin_q) << 9;
|
||||
u32 len = queue_max_hw_sectors(ns->ctrl->admin_q) << 9;
|
||||
u32 nlb_pr_rq = len / sizeof(u64);
|
||||
u64 cmd_slba = slba;
|
||||
void *entries;
|
||||
@@ -352,10 +350,10 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
|
||||
c.l2p.slba = cpu_to_le64(cmd_slba);
|
||||
c.l2p.nlb = cpu_to_le32(cmd_nlb);
|
||||
|
||||
ret = nvme_submit_sync_cmd(dev->admin_q,
|
||||
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
|
||||
(struct nvme_command *)&c, entries, len);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "L2P table transfer failed (%d)\n",
|
||||
dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
|
||||
ret);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
@@ -381,7 +379,7 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
|
||||
{
|
||||
struct request_queue *q = nvmdev->q;
|
||||
struct nvme_ns *ns = q->queuedata;
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
struct nvme_ctrl *ctrl = ns->ctrl;
|
||||
struct nvme_nvm_command c = {};
|
||||
struct nvme_nvm_bb_tbl *bb_tbl;
|
||||
int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks;
|
||||
@@ -395,30 +393,30 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
|
||||
if (!bb_tbl)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
|
||||
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
|
||||
bb_tbl, tblsz);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "get bad block table failed (%d)\n", ret);
|
||||
dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
|
||||
bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
|
||||
dev_err(dev->dev, "bbt format mismatch\n");
|
||||
dev_err(ctrl->dev, "bbt format mismatch\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le16_to_cpu(bb_tbl->verid) != 1) {
|
||||
ret = -EINVAL;
|
||||
dev_err(dev->dev, "bbt version not supported\n");
|
||||
dev_err(ctrl->dev, "bbt version not supported\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) {
|
||||
ret = -EINVAL;
|
||||
dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)",
|
||||
dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
|
||||
le32_to_cpu(bb_tbl->tblks), nr_blocks);
|
||||
goto out;
|
||||
}
|
||||
@@ -434,7 +432,6 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
|
||||
int type)
|
||||
{
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
struct nvme_nvm_command c = {};
|
||||
int ret = 0;
|
||||
|
||||
@@ -444,10 +441,10 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
|
||||
c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
|
||||
c.set_bb.value = type;
|
||||
|
||||
ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c,
|
||||
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
|
||||
NULL, 0);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "set bad block table failed (%d)\n", ret);
|
||||
dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -532,9 +529,8 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
|
||||
static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
|
||||
{
|
||||
struct nvme_ns *ns = nvmdev->q->queuedata;
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
|
||||
return dma_pool_create(name, dev->dev, PAGE_SIZE, PAGE_SIZE, 0);
|
||||
return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
|
||||
}
|
||||
|
||||
static void nvme_nvm_destroy_dma_pool(void *pool)
|
||||
@@ -592,8 +588,9 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name)
|
||||
|
||||
int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id)
|
||||
{
|
||||
struct nvme_dev *dev = ns->dev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
struct nvme_ctrl *ctrl = ns->ctrl;
|
||||
/* XXX: this is poking into PCI structures from generic code! */
|
||||
struct pci_dev *pdev = to_pci_dev(ctrl->dev);
|
||||
|
||||
/* QEMU NVMe simulator - PCI ID + Vendor specific bit */
|
||||
if (pdev->vendor == PCI_VENDOR_ID_CNEX &&
|
||||
|
Reference in New Issue
Block a user