Merge tag 'for-linus-20180608' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A few fixes for this merge window, where some of them should go in sooner rather than later, hence a new pull this week. This pull request contains: - Set of NVMe fixes, mostly follow up cleanups/fixes to the queue changes, but also teardown/removal and misc changes (Christop/Dan/ Johannes/Sagi/Steve). - Two lightnvm fixes for issues that showed up in this window (Colin/Wei). - Failfast/driver flags inheritance for flush requests (Hannes). - The md device put sanitization and fix (Kent). - dm bio_set inheritance fix (me). - nbd discard granularity fix (Josef). - nbd consistency in command printing (Kevin). - Loop recursion validation fix (Ted). - Partition overlap check (Wang)" [ .. and now my build is warning-free again thanks to the md fix - Linus ] * tag 'for-linus-20180608' of git://git.kernel.dk/linux-block: (22 commits) nvme: cleanup double shift issue nvme-pci: make CMB SQ mod-param read-only nvme-pci: unquiesce dead controller queues nvme-pci: remove HMB teardown on reset nvme-pci: queue creation fixes nvme-pci: remove unnecessary completion doorbell check nvme-pci: remove unnecessary nested locking nvmet: filter newlines from user input nvme-rdma: correctly check for target keyed sgl support nvme: don't hold nvmf_transports_rwsem for more than transport lookups nvmet: return all zeroed buffer when we can't find an active namespace md: Unify mddev destruction paths dm: use bioset_init_from_src() to copy bio_set block: add bioset_init_from_src() helper block: always set partition number to '0' in blk_partition_remap() block: pass failfast and driver-specific flags to flush requests nbd: set discard_alignment to the granularity nbd: Consistently use request pointer in debug messages. block: add verifier for cmdline partition lightnvm: pblk: fix resource leak of invalid_bitmap ...
Este commit está contenido en:
@@ -1953,9 +1953,10 @@ static void free_dev(struct mapped_device *md)
|
||||
kvfree(md);
|
||||
}
|
||||
|
||||
static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
||||
static int __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
||||
{
|
||||
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
|
||||
int ret = 0;
|
||||
|
||||
if (dm_table_bio_based(t)) {
|
||||
/*
|
||||
@@ -1982,13 +1983,16 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
||||
bioset_initialized(&md->bs) ||
|
||||
bioset_initialized(&md->io_bs));
|
||||
|
||||
md->bs = p->bs;
|
||||
memset(&p->bs, 0, sizeof(p->bs));
|
||||
md->io_bs = p->io_bs;
|
||||
memset(&p->io_bs, 0, sizeof(p->io_bs));
|
||||
ret = bioset_init_from_src(&md->bs, &p->bs);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = bioset_init_from_src(&md->io_bs, &p->io_bs);
|
||||
if (ret)
|
||||
bioset_exit(&md->bs);
|
||||
out:
|
||||
/* mempool bind completed, no longer need any mempools in the table */
|
||||
dm_table_free_md_mempools(t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2033,6 +2037,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
||||
struct request_queue *q = md->queue;
|
||||
bool request_based = dm_table_request_based(t);
|
||||
sector_t size;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&md->suspend_lock);
|
||||
|
||||
@@ -2068,7 +2073,11 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
||||
md->immutable_target = dm_table_get_immutable_target(t);
|
||||
}
|
||||
|
||||
__bind_mempools(md, t);
|
||||
ret = __bind_mempools(md, t);
|
||||
if (ret) {
|
||||
old_map = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
|
||||
rcu_assign_pointer(md->map, (void *)t);
|
||||
@@ -2078,6 +2087,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
||||
if (old_map)
|
||||
dm_sync_table(md);
|
||||
|
||||
out:
|
||||
return old_map;
|
||||
}
|
||||
|
||||
|
@@ -84,6 +84,8 @@ static void autostart_arrays(int part);
|
||||
static LIST_HEAD(pers_list);
|
||||
static DEFINE_SPINLOCK(pers_lock);
|
||||
|
||||
static struct kobj_type md_ktype;
|
||||
|
||||
struct md_cluster_operations *md_cluster_ops;
|
||||
EXPORT_SYMBOL(md_cluster_ops);
|
||||
struct module *md_cluster_mod;
|
||||
@@ -510,11 +512,6 @@ static void mddev_delayed_delete(struct work_struct *ws);
|
||||
|
||||
static void mddev_put(struct mddev *mddev)
|
||||
{
|
||||
struct bio_set bs, sync_bs;
|
||||
|
||||
memset(&bs, 0, sizeof(bs));
|
||||
memset(&sync_bs, 0, sizeof(sync_bs));
|
||||
|
||||
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
|
||||
return;
|
||||
if (!mddev->raid_disks && list_empty(&mddev->disks) &&
|
||||
@@ -522,30 +519,23 @@ static void mddev_put(struct mddev *mddev)
|
||||
/* Array is not configured at all, and not held active,
|
||||
* so destroy it */
|
||||
list_del_init(&mddev->all_mddevs);
|
||||
bs = mddev->bio_set;
|
||||
sync_bs = mddev->sync_set;
|
||||
memset(&mddev->bio_set, 0, sizeof(mddev->bio_set));
|
||||
memset(&mddev->sync_set, 0, sizeof(mddev->sync_set));
|
||||
if (mddev->gendisk) {
|
||||
/* We did a probe so need to clean up. Call
|
||||
* queue_work inside the spinlock so that
|
||||
* flush_workqueue() after mddev_find will
|
||||
* succeed in waiting for the work to be done.
|
||||
*/
|
||||
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
|
||||
queue_work(md_misc_wq, &mddev->del_work);
|
||||
} else
|
||||
kfree(mddev);
|
||||
|
||||
/*
|
||||
* Call queue_work inside the spinlock so that
|
||||
* flush_workqueue() after mddev_find will succeed in waiting
|
||||
* for the work to be done.
|
||||
*/
|
||||
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
|
||||
queue_work(md_misc_wq, &mddev->del_work);
|
||||
}
|
||||
spin_unlock(&all_mddevs_lock);
|
||||
bioset_exit(&bs);
|
||||
bioset_exit(&sync_bs);
|
||||
}
|
||||
|
||||
static void md_safemode_timeout(struct timer_list *t);
|
||||
|
||||
void mddev_init(struct mddev *mddev)
|
||||
{
|
||||
kobject_init(&mddev->kobj, &md_ktype);
|
||||
mutex_init(&mddev->open_mutex);
|
||||
mutex_init(&mddev->reconfig_mutex);
|
||||
mutex_init(&mddev->bitmap_info.mutex);
|
||||
@@ -5215,6 +5205,8 @@ static void md_free(struct kobject *ko)
|
||||
put_disk(mddev->gendisk);
|
||||
percpu_ref_exit(&mddev->writes_pending);
|
||||
|
||||
bioset_exit(&mddev->bio_set);
|
||||
bioset_exit(&mddev->sync_set);
|
||||
kfree(mddev);
|
||||
}
|
||||
|
||||
@@ -5348,8 +5340,7 @@ static int md_alloc(dev_t dev, char *name)
|
||||
mutex_lock(&mddev->open_mutex);
|
||||
add_disk(disk);
|
||||
|
||||
error = kobject_init_and_add(&mddev->kobj, &md_ktype,
|
||||
&disk_to_dev(disk)->kobj, "%s", "md");
|
||||
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
|
||||
if (error) {
|
||||
/* This isn't possible, but as kobject_init_and_add is marked
|
||||
* __must_check, we must do something with the result
|
||||
@@ -5506,7 +5497,7 @@ int md_run(struct mddev *mddev)
|
||||
if (!bioset_initialized(&mddev->sync_set)) {
|
||||
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
if (err)
|
||||
goto abort;
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_lock(&pers_lock);
|
||||
@@ -5519,8 +5510,7 @@ int md_run(struct mddev *mddev)
|
||||
else
|
||||
pr_warn("md: personality for level %s is not loaded!\n",
|
||||
mddev->clevel);
|
||||
err = -EINVAL;
|
||||
goto abort;
|
||||
return -EINVAL;
|
||||
}
|
||||
spin_unlock(&pers_lock);
|
||||
if (mddev->level != pers->level) {
|
||||
@@ -5533,8 +5523,7 @@ int md_run(struct mddev *mddev)
|
||||
pers->start_reshape == NULL) {
|
||||
/* This personality cannot handle reshaping... */
|
||||
module_put(pers->owner);
|
||||
err = -EINVAL;
|
||||
goto abort;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pers->sync_request) {
|
||||
@@ -5603,7 +5592,7 @@ int md_run(struct mddev *mddev)
|
||||
mddev->private = NULL;
|
||||
module_put(pers->owner);
|
||||
bitmap_destroy(mddev);
|
||||
goto abort;
|
||||
return err;
|
||||
}
|
||||
if (mddev->queue) {
|
||||
bool nonrot = true;
|
||||
@@ -5665,12 +5654,6 @@ int md_run(struct mddev *mddev)
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_action);
|
||||
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
||||
return 0;
|
||||
|
||||
abort:
|
||||
bioset_exit(&mddev->bio_set);
|
||||
bioset_exit(&mddev->sync_set);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(md_run);
|
||||
|
||||
|
Referencia en una nueva incidencia
Block a user