Merge tag 'dm-4.11-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer: - Fix dm-raid transient device failure processing and other smaller tweaks. - Add journal support to the DM raid target to close the 'write hole' on raid 4/5/6. - Fix dm-cache corruption, due to rounding bug, when cache exceeds 2TB. - Add 'metadata2' feature to dm-cache to separate the dirty bitset out from other cache metadata. This improves speed of shutting down a large cache device (which implies writing out dirty bits). - Fix a memory leak during dm-stats data structure destruction. - Fix a DM multipath round-robin path selector performance regression that was caused by less precise balancing across all paths. - Lastly, introduce a DM core fix for a long-standing DM snapshot deadlock that is rooted in the complexity of the device stack used in conjunction with block core maintaining bios on current->bio_list to manage recursion in generic_make_request(). A more comprehensive fix to block core (and its hook in the cpu scheduler) would be wonderful but this DM-specific fix is pragmatic considering how difficult it has been to make progress on a generic fix. * tag 'dm-4.11-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (22 commits) dm: flush queued bios when process blocks to avoid deadlock dm round robin: revert "use percpu 'repeat_count' and 'current_path'" dm stats: fix a leaked s->histogram_boundaries array dm space map metadata: constify dm_space_map structures dm cache metadata: use cursor api in blocks_are_clean_separate_dirty() dm persistent data: add cursor skip functions to the cursor APIs dm cache metadata: use dm_bitset_new() to create the dirty bitset in format 2 dm bitset: add dm_bitset_new() dm cache metadata: name the cache block that couldn't be loaded dm cache metadata: add "metadata2" feature dm cache metadata: use bitset cursor api to load discard bitset dm bitset: introduce cursor api dm btree: use GFP_NOFS in dm_btree_del() dm space map common: memcpy the disk root to ensure it's arch aligned dm block manager: add unlikely() annotations on dm_bufio error paths dm cache: fix corruption seen when using cache > 2TB dm raid: cleanup awkward branching in raid_message() option processing dm raid: use mddev rather than rdev->mddev dm raid: use read_disk_sb() throughout dm raid: add raid4/5/6 journaling support ...
This commit is contained in:
@@ -179,6 +179,7 @@ enum cache_io_mode {
|
||||
struct cache_features {
|
||||
enum cache_metadata_mode mode;
|
||||
enum cache_io_mode io_mode;
|
||||
unsigned metadata_version;
|
||||
};
|
||||
|
||||
struct cache_stats {
|
||||
@@ -248,7 +249,7 @@ struct cache {
|
||||
/*
|
||||
* Fields for converting from sectors to blocks.
|
||||
*/
|
||||
uint32_t sectors_per_block;
|
||||
sector_t sectors_per_block;
|
||||
int sectors_per_block_shift;
|
||||
|
||||
spinlock_t lock;
|
||||
@@ -2534,13 +2535,14 @@ static void init_features(struct cache_features *cf)
|
||||
{
|
||||
cf->mode = CM_WRITE;
|
||||
cf->io_mode = CM_IO_WRITEBACK;
|
||||
cf->metadata_version = 1;
|
||||
}
|
||||
|
||||
static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
|
||||
char **error)
|
||||
{
|
||||
static struct dm_arg _args[] = {
|
||||
{0, 1, "Invalid number of cache feature arguments"},
|
||||
{0, 2, "Invalid number of cache feature arguments"},
|
||||
};
|
||||
|
||||
int r;
|
||||
@@ -2566,6 +2568,9 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
|
||||
else if (!strcasecmp(arg, "passthrough"))
|
||||
cf->io_mode = CM_IO_PASSTHROUGH;
|
||||
|
||||
else if (!strcasecmp(arg, "metadata2"))
|
||||
cf->metadata_version = 2;
|
||||
|
||||
else {
|
||||
*error = "Unrecognised cache feature requested";
|
||||
return -EINVAL;
|
||||
@@ -2820,7 +2825,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
|
||||
cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
|
||||
ca->block_size, may_format,
|
||||
dm_cache_policy_get_hint_size(cache->policy));
|
||||
dm_cache_policy_get_hint_size(cache->policy),
|
||||
ca->features.metadata_version);
|
||||
if (IS_ERR(cmd)) {
|
||||
*error = "Error creating metadata object";
|
||||
r = PTR_ERR(cmd);
|
||||
@@ -3165,21 +3171,16 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
|
||||
|
||||
static int write_dirty_bitset(struct cache *cache)
|
||||
{
|
||||
unsigned i, r;
|
||||
int r;
|
||||
|
||||
if (get_cache_mode(cache) >= CM_READ_ONLY)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < from_cblock(cache->cache_size); i++) {
|
||||
r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
|
||||
is_dirty(cache, to_cblock(i)));
|
||||
if (r) {
|
||||
metadata_operation_failed(cache, "dm_cache_set_dirty", r);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset);
|
||||
if (r)
|
||||
metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r);
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int write_discard_bitset(struct cache *cache)
|
||||
@@ -3540,11 +3541,11 @@ static void cache_status(struct dm_target *ti, status_type_t type,
|
||||
|
||||
residency = policy_residency(cache->policy);
|
||||
|
||||
DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
|
||||
DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
|
||||
(unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
|
||||
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
|
||||
(unsigned long long)nr_blocks_metadata,
|
||||
cache->sectors_per_block,
|
||||
(unsigned long long)cache->sectors_per_block,
|
||||
(unsigned long long) from_cblock(residency),
|
||||
(unsigned long long) from_cblock(cache->cache_size),
|
||||
(unsigned) atomic_read(&cache->stats.read_hit),
|
||||
@@ -3555,14 +3556,19 @@ static void cache_status(struct dm_target *ti, status_type_t type,
|
||||
(unsigned) atomic_read(&cache->stats.promotion),
|
||||
(unsigned long) atomic_read(&cache->nr_dirty));
|
||||
|
||||
if (cache->features.metadata_version == 2)
|
||||
DMEMIT("2 metadata2 ");
|
||||
else
|
||||
DMEMIT("1 ");
|
||||
|
||||
if (writethrough_mode(&cache->features))
|
||||
DMEMIT("1 writethrough ");
|
||||
DMEMIT("writethrough ");
|
||||
|
||||
else if (passthrough_mode(&cache->features))
|
||||
DMEMIT("1 passthrough ");
|
||||
DMEMIT("passthrough ");
|
||||
|
||||
else if (writeback_mode(&cache->features))
|
||||
DMEMIT("1 writeback ");
|
||||
DMEMIT("writeback ");
|
||||
|
||||
else {
|
||||
DMERR("%s: internal error: unknown io mode: %d",
|
||||
@@ -3810,7 +3816,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
|
||||
static struct target_type cache_target = {
|
||||
.name = "cache",
|
||||
.version = {1, 9, 0},
|
||||
.version = {1, 10, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = cache_ctr,
|
||||
.dtr = cache_dtr,
|
||||
|
Reference in New Issue
Block a user