Merge tag 'for-4.12/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer: - A major update for DM cache that reduces the latency for deciding whether blocks should migrate to/from the cache. The bio-prison-v2 interface supports this improvement by enabling direct dispatch of work to workqueues rather than having to delay the actual work dispatch to the DM cache core. So the dm-cache policies are much more nimble by being able to drive IO as they see fit. One immediate benefit from the improved latency is a cache that should be much more adaptive to changing workloads. - Add a new DM integrity target that emulates a block device that has additional per-sector tags that can be used for storing integrity information. - Add a new authenticated encryption feature to the DM crypt target that builds on the capabilities provided by the DM integrity target. - Add MD interface for switching the raid4/5/6 journal mode and update the DM raid target to use it to enable aid4/5/6 journal write-back support. - Switch the DM verity target over to using the asynchronous hash crypto API (this helps work better with architectures that have access to off-CPU algorithm providers, which should reduce CPU utilization). - Various request-based DM and DM multipath fixes and improvements from Bart and Christoph. - A DM thinp target fix for a bio structure leak that occurs for each discard IFF discard passdown is enabled. - A fix for a possible deadlock in DM bufio and a fix to re-check the new buffer allocation watermark in the face of competing admin changes to the 'max_cache_size_bytes' tunable. - A couple DM core cleanups. * tag 'for-4.12/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (50 commits) dm bufio: check new buffer allocation watermark every 30 seconds dm bufio: avoid a possible ABBA deadlock dm mpath: make it easier to detect unintended I/O request flushes dm mpath: cleanup QUEUE_IF_NO_PATH bit manipulation by introducing assign_bit() dm mpath: micro-optimize the hot path relative to MPATHF_QUEUE_IF_NO_PATH dm: introduce enum dm_queue_mode to cleanup related code dm mpath: verify __pg_init_all_paths locking assumptions at runtime dm: verify suspend_locking assumptions at runtime dm block manager: remove an unused argument from dm_block_manager_create() dm rq: check blk_mq_register_dev() return value in dm_mq_init_request_queue() dm mpath: delay requeuing while path initialization is in progress dm mpath: avoid that path removal can trigger an infinite loop dm mpath: split and rename activate_path() to prepare for its expanded use dm ioctl: prevent stack leak in dm ioctl call dm integrity: use previously calculated log2 of sectors_per_block dm integrity: use hex2bin instead of open-coded variant dm crypt: replace custom implementation of hex2bin() dm crypt: remove obsolete references to per-CPU state dm verity: switch to using asynchronous hash crypto API dm crypt: use WQ_HIGHPRI for the IO and crypt workqueues ...
This commit is contained in:
@@ -30,7 +30,7 @@
|
||||
|
||||
struct dm_table {
|
||||
struct mapped_device *md;
|
||||
unsigned type;
|
||||
enum dm_queue_mode type;
|
||||
|
||||
/* btree table */
|
||||
unsigned int depth;
|
||||
@@ -47,6 +47,7 @@ struct dm_table {
|
||||
bool integrity_supported:1;
|
||||
bool singleton:1;
|
||||
bool all_blk_mq:1;
|
||||
unsigned integrity_added:1;
|
||||
|
||||
/*
|
||||
* Indicates the rw permissions for the new logical
|
||||
@@ -372,7 +373,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
|
||||
*/
|
||||
dev_t dm_get_dev_t(const char *path)
|
||||
{
|
||||
dev_t uninitialized_var(dev);
|
||||
dev_t dev;
|
||||
struct block_device *bdev;
|
||||
|
||||
bdev = lookup_bdev(path);
|
||||
@@ -626,13 +627,13 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
|
||||
|
||||
struct dm_target *uninitialized_var(ti);
|
||||
struct queue_limits ti_limits;
|
||||
unsigned i = 0;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* Check each entry in the table in turn.
|
||||
*/
|
||||
while (i < dm_table_get_num_targets(table)) {
|
||||
ti = dm_table_get_target(table, i++);
|
||||
for (i = 0; i < dm_table_get_num_targets(table); i++) {
|
||||
ti = dm_table_get_target(table, i);
|
||||
|
||||
blk_set_stacking_limits(&ti_limits);
|
||||
|
||||
@@ -725,6 +726,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
|
||||
t->immutable_target_type = tgt->type;
|
||||
}
|
||||
|
||||
if (dm_target_has_integrity(tgt->type))
|
||||
t->integrity_added = 1;
|
||||
|
||||
tgt->table = t;
|
||||
tgt->begin = start;
|
||||
tgt->len = len;
|
||||
@@ -821,19 +825,19 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
|
||||
}
|
||||
EXPORT_SYMBOL(dm_consume_args);
|
||||
|
||||
static bool __table_type_bio_based(unsigned table_type)
|
||||
static bool __table_type_bio_based(enum dm_queue_mode table_type)
|
||||
{
|
||||
return (table_type == DM_TYPE_BIO_BASED ||
|
||||
table_type == DM_TYPE_DAX_BIO_BASED);
|
||||
}
|
||||
|
||||
static bool __table_type_request_based(unsigned table_type)
|
||||
static bool __table_type_request_based(enum dm_queue_mode table_type)
|
||||
{
|
||||
return (table_type == DM_TYPE_REQUEST_BASED ||
|
||||
table_type == DM_TYPE_MQ_REQUEST_BASED);
|
||||
}
|
||||
|
||||
void dm_table_set_type(struct dm_table *t, unsigned type)
|
||||
void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
|
||||
{
|
||||
t->type = type;
|
||||
}
|
||||
@@ -850,11 +854,11 @@ static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
|
||||
static bool dm_table_supports_dax(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
unsigned i;
|
||||
|
||||
/* Ensure that all targets support DAX. */
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->type->direct_access)
|
||||
return false;
|
||||
@@ -875,7 +879,7 @@ static int dm_table_determine_type(struct dm_table *t)
|
||||
struct dm_target *tgt;
|
||||
struct dm_dev_internal *dd;
|
||||
struct list_head *devices = dm_table_get_devices(t);
|
||||
unsigned live_md_type = dm_get_md_type(t->md);
|
||||
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
|
||||
|
||||
if (t->type != DM_TYPE_NONE) {
|
||||
/* target already set the table's type */
|
||||
@@ -984,7 +988,7 @@ verify_rq_based:
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned dm_table_get_type(struct dm_table *t)
|
||||
enum dm_queue_mode dm_table_get_type(struct dm_table *t)
|
||||
{
|
||||
return t->type;
|
||||
}
|
||||
@@ -1006,11 +1010,11 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
|
||||
|
||||
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *uninitialized_var(ti);
|
||||
unsigned i = 0;
|
||||
struct dm_target *ti;
|
||||
unsigned i;
|
||||
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
if (dm_target_is_wildcard(ti->type))
|
||||
return ti;
|
||||
}
|
||||
@@ -1035,7 +1039,7 @@ bool dm_table_all_blk_mq_devices(struct dm_table *t)
|
||||
|
||||
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
|
||||
{
|
||||
unsigned type = dm_table_get_type(t);
|
||||
enum dm_queue_mode type = dm_table_get_type(t);
|
||||
unsigned per_io_data_size = 0;
|
||||
struct dm_target *tgt;
|
||||
unsigned i;
|
||||
@@ -1131,6 +1135,13 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
|
||||
struct list_head *devices = dm_table_get_devices(t);
|
||||
struct dm_dev_internal *dd = NULL;
|
||||
struct gendisk *prev_disk = NULL, *template_disk = NULL;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
struct dm_target *ti = dm_table_get_target(t, i);
|
||||
if (!dm_target_passes_integrity(ti->type))
|
||||
goto no_integrity;
|
||||
}
|
||||
|
||||
list_for_each_entry(dd, devices, list) {
|
||||
template_disk = dd->dm_dev->bdev->bd_disk;
|
||||
@@ -1168,6 +1179,10 @@ static int dm_table_register_integrity(struct dm_table *t)
|
||||
struct mapped_device *md = t->md;
|
||||
struct gendisk *template_disk = NULL;
|
||||
|
||||
/* If target handles integrity itself do not register it here. */
|
||||
if (t->integrity_added)
|
||||
return 0;
|
||||
|
||||
template_disk = dm_table_get_integrity_disk(t);
|
||||
if (!template_disk)
|
||||
return 0;
|
||||
@@ -1313,15 +1328,16 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
|
||||
*/
|
||||
bool dm_table_has_no_data_devices(struct dm_table *table)
|
||||
{
|
||||
struct dm_target *uninitialized_var(ti);
|
||||
unsigned i = 0, num_devices = 0;
|
||||
struct dm_target *ti;
|
||||
unsigned i, num_devices;
|
||||
|
||||
while (i < dm_table_get_num_targets(table)) {
|
||||
ti = dm_table_get_target(table, i++);
|
||||
for (i = 0; i < dm_table_get_num_targets(table); i++) {
|
||||
ti = dm_table_get_target(table, i);
|
||||
|
||||
if (!ti->type->iterate_devices)
|
||||
return false;
|
||||
|
||||
num_devices = 0;
|
||||
ti->type->iterate_devices(ti, count_device, &num_devices);
|
||||
if (num_devices)
|
||||
return false;
|
||||
@@ -1336,16 +1352,16 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
|
||||
int dm_calculate_queue_limits(struct dm_table *table,
|
||||
struct queue_limits *limits)
|
||||
{
|
||||
struct dm_target *uninitialized_var(ti);
|
||||
struct dm_target *ti;
|
||||
struct queue_limits ti_limits;
|
||||
unsigned i = 0;
|
||||
unsigned i;
|
||||
|
||||
blk_set_stacking_limits(limits);
|
||||
|
||||
while (i < dm_table_get_num_targets(table)) {
|
||||
for (i = 0; i < dm_table_get_num_targets(table); i++) {
|
||||
blk_set_stacking_limits(&ti_limits);
|
||||
|
||||
ti = dm_table_get_target(table, i++);
|
||||
ti = dm_table_get_target(table, i);
|
||||
|
||||
if (!ti->type->iterate_devices)
|
||||
goto combine_limits;
|
||||
@@ -1394,6 +1410,9 @@ static void dm_table_verify_integrity(struct dm_table *t)
|
||||
{
|
||||
struct gendisk *template_disk = NULL;
|
||||
|
||||
if (t->integrity_added)
|
||||
return;
|
||||
|
||||
if (t->integrity_supported) {
|
||||
/*
|
||||
* Verify that the original integrity profile
|
||||
@@ -1424,7 +1443,7 @@ static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* Require at least one underlying device to support flushes.
|
||||
@@ -1432,8 +1451,8 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
|
||||
* so we need to use iterate_devices here, which targets
|
||||
* supporting flushes must provide.
|
||||
*/
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->num_flush_bios)
|
||||
continue;
|
||||
@@ -1477,10 +1496,10 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
|
||||
iterate_devices_callout_fn func)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
unsigned i;
|
||||
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->type->iterate_devices ||
|
||||
!ti->type->iterate_devices(ti, func, NULL))
|
||||
@@ -1501,10 +1520,10 @@ static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *de
|
||||
static bool dm_table_supports_write_same(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
unsigned i;
|
||||
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->num_write_same_bios)
|
||||
return false;
|
||||
@@ -1556,7 +1575,7 @@ static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
static bool dm_table_supports_discards(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
unsigned i;
|
||||
|
||||
/*
|
||||
* Unless any target used by the table set discards_supported,
|
||||
@@ -1565,8 +1584,8 @@ static bool dm_table_supports_discards(struct dm_table *t)
|
||||
* so we need to use iterate_devices here, which targets
|
||||
* supporting discard selectively must provide.
|
||||
*/
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
for (i = 0; i < dm_table_get_num_targets(t); i++) {
|
||||
ti = dm_table_get_target(t, i);
|
||||
|
||||
if (!ti->num_discard_bios)
|
||||
continue;
|
||||
@@ -1672,6 +1691,8 @@ static void suspend_targets(struct dm_table *t, enum suspend_mode mode)
|
||||
int i = t->num_targets;
|
||||
struct dm_target *ti = t->targets;
|
||||
|
||||
lockdep_assert_held(&t->md->suspend_lock);
|
||||
|
||||
while (i--) {
|
||||
switch (mode) {
|
||||
case PRESUSPEND:
|
||||
@@ -1719,6 +1740,8 @@ int dm_table_resume_targets(struct dm_table *t)
|
||||
{
|
||||
int i, r = 0;
|
||||
|
||||
lockdep_assert_held(&t->md->suspend_lock);
|
||||
|
||||
for (i = 0; i < t->num_targets; i++) {
|
||||
struct dm_target *ti = t->targets + i;
|
||||
|
||||
|
Reference in New Issue
Block a user