Merge tag 'for-5.9/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer: - DM multipath locking fixes around m->flags tests and improvements to bio-based code so that it follows patterns established by request-based code. - Request-based DM core improvement to eliminate unnecessary call to blk_mq_queue_stopped(). - Add "panic_on_corruption" error handling mode to DM verity target. - DM bufio fix to to perform buffer cleanup from a workqueue rather than wait for IO in reclaim context from shrinker. - DM crypt improvement to optionally avoid async processing via workqueues for reads and/or writes -- via "no_read_workqueue" and "no_write_workqueue" features. This more direct IO processing improves latency and throughput with faster storage. Avoiding workqueue IO submission for writes (DM_CRYPT_NO_WRITE_WORKQUEUE) is a requirement for adding zoned block device support to DM crypt. - Add zoned block device support to DM crypt. Makes use of DM_CRYPT_NO_WRITE_WORKQUEUE and a new optional feature (DM_CRYPT_WRITE_INLINE) that allows write completion to wait for encryption to complete. This allows write ordering to be preserved, which is needed for zoned block devices. - Fix DM ebs target's check for REQ_OP_FLUSH. - Fix DM core's report zones support to not report more zones than were requested. - A few small compiler warning fixes. - DM dust improvements to return output directly to the user rather than require they scrape the system log for output. * tag 'for-5.9/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: dm: don't call report zones for more than the user requested dm ebs: Fix incorrect checking for REQ_OP_FLUSH dm init: Set file local variable static dm ioctl: Fix compilation warning dm raid: Remove empty if statement dm verity: Fix compilation warning dm crypt: Enable zoned block device support dm crypt: add flags to optionally bypass kcryptd workqueues dm bufio: do buffer cleanup from a workqueue dm rq: don't call blk_mq_queue_stopped() in dm_stop_queue() dm dust: add interface to list all badblocks dm dust: report some message results directly back to user dm verity: add "panic_on_corruption" error handling mode dm mpath: use double checked locking in fast path dm mpath: rename current_pgpath to pgpath in multipath_prepare_ioctl dm mpath: rework __map_bio() dm mpath: factor out multipath_queue_bio dm mpath: push locking down to must_push_back_rq() dm mpath: take m->lock spinlock when testing QUEUE_IF_NO_PATH dm mpath: changes from initial m->flags locking audit
This commit is contained in:
@@ -69,6 +69,7 @@ struct dm_crypt_io {
|
||||
u8 *integrity_metadata;
|
||||
bool integrity_metadata_from_pool;
|
||||
struct work_struct work;
|
||||
struct tasklet_struct tasklet;
|
||||
|
||||
struct convert_context ctx;
|
||||
|
||||
@@ -127,7 +128,9 @@ struct iv_elephant_private {
|
||||
* and encrypts / decrypts at the same time.
|
||||
*/
|
||||
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
|
||||
DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
|
||||
DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
|
||||
DM_CRYPT_NO_READ_WORKQUEUE, DM_CRYPT_NO_WRITE_WORKQUEUE,
|
||||
DM_CRYPT_WRITE_INLINE };
|
||||
|
||||
enum cipher_flags {
|
||||
CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */
|
||||
@@ -1523,7 +1526,7 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
|
||||
* Encrypt / decrypt data from one bio to another one (can be the same one)
|
||||
*/
|
||||
static blk_status_t crypt_convert(struct crypt_config *cc,
|
||||
struct convert_context *ctx)
|
||||
struct convert_context *ctx, bool atomic)
|
||||
{
|
||||
unsigned int tag_offset = 0;
|
||||
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
|
||||
@@ -1566,7 +1569,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
|
||||
atomic_dec(&ctx->cc_pending);
|
||||
ctx->cc_sector += sector_step;
|
||||
tag_offset++;
|
||||
cond_resched();
|
||||
if (!atomic)
|
||||
cond_resched();
|
||||
continue;
|
||||
/*
|
||||
* There was a data integrity error.
|
||||
@@ -1892,7 +1896,8 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
|
||||
|
||||
clone->bi_iter.bi_sector = cc->start + io->sector;
|
||||
|
||||
if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
|
||||
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
|
||||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
|
||||
submit_bio_noacct(clone);
|
||||
return;
|
||||
}
|
||||
@@ -1915,9 +1920,32 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
|
||||
spin_unlock_irqrestore(&cc->write_thread_lock, flags);
|
||||
}
|
||||
|
||||
static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
|
||||
struct convert_context *ctx)
|
||||
|
||||
{
|
||||
if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Note: zone append writes (REQ_OP_ZONE_APPEND) do not have ordering
|
||||
* constraints so they do not need to be issued inline by
|
||||
* kcryptd_crypt_write_convert().
|
||||
*/
|
||||
switch (bio_op(ctx->bio_in)) {
|
||||
case REQ_OP_WRITE:
|
||||
case REQ_OP_WRITE_SAME:
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
||||
{
|
||||
struct crypt_config *cc = io->cc;
|
||||
struct convert_context *ctx = &io->ctx;
|
||||
struct bio *clone;
|
||||
int crypt_finished;
|
||||
sector_t sector = io->sector;
|
||||
@@ -1927,7 +1955,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
||||
* Prevent io from disappearing until this function completes.
|
||||
*/
|
||||
crypt_inc_pending(io);
|
||||
crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
|
||||
crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
|
||||
|
||||
clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
|
||||
if (unlikely(!clone)) {
|
||||
@@ -1941,10 +1969,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
|
||||
sector += bio_sectors(clone);
|
||||
|
||||
crypt_inc_pending(io);
|
||||
r = crypt_convert(cc, &io->ctx);
|
||||
r = crypt_convert(cc, ctx,
|
||||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
|
||||
if (r)
|
||||
io->error = r;
|
||||
crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
|
||||
crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
|
||||
if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
|
||||
/* Wait for completion signaled by kcryptd_async_done() */
|
||||
wait_for_completion(&ctx->restart);
|
||||
crypt_finished = 1;
|
||||
}
|
||||
|
||||
/* Encryption was already finished, submit io now */
|
||||
if (crypt_finished) {
|
||||
@@ -1971,7 +2005,8 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
|
||||
crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
|
||||
io->sector);
|
||||
|
||||
r = crypt_convert(cc, &io->ctx);
|
||||
r = crypt_convert(cc, &io->ctx,
|
||||
test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
|
||||
if (r)
|
||||
io->error = r;
|
||||
|
||||
@@ -2015,10 +2050,21 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
|
||||
if (!atomic_dec_and_test(&ctx->cc_pending))
|
||||
return;
|
||||
|
||||
if (bio_data_dir(io->base_bio) == READ)
|
||||
/*
|
||||
* The request is fully completed: for inline writes, let
|
||||
* kcryptd_crypt_write_convert() do the IO submission.
|
||||
*/
|
||||
if (bio_data_dir(io->base_bio) == READ) {
|
||||
kcryptd_crypt_read_done(io);
|
||||
else
|
||||
kcryptd_crypt_write_io_submit(io, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (kcryptd_crypt_write_inline(cc, ctx)) {
|
||||
complete(&ctx->restart);
|
||||
return;
|
||||
}
|
||||
|
||||
kcryptd_crypt_write_io_submit(io, 1);
|
||||
}
|
||||
|
||||
static void kcryptd_crypt(struct work_struct *work)
|
||||
@@ -2031,10 +2077,28 @@ static void kcryptd_crypt(struct work_struct *work)
|
||||
kcryptd_crypt_write_convert(io);
|
||||
}
|
||||
|
||||
static void kcryptd_crypt_tasklet(unsigned long work)
|
||||
{
|
||||
kcryptd_crypt((struct work_struct *)work);
|
||||
}
|
||||
|
||||
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
|
||||
{
|
||||
struct crypt_config *cc = io->cc;
|
||||
|
||||
if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
|
||||
(bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
|
||||
if (in_irq()) {
|
||||
/* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
|
||||
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
|
||||
tasklet_schedule(&io->tasklet);
|
||||
return;
|
||||
}
|
||||
|
||||
kcryptd_crypt(&io->work);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&io->work, kcryptd_crypt);
|
||||
queue_work(cc->crypt_queue, &io->work);
|
||||
}
|
||||
@@ -2838,7 +2902,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
|
||||
struct crypt_config *cc = ti->private;
|
||||
struct dm_arg_set as;
|
||||
static const struct dm_arg _args[] = {
|
||||
{0, 6, "Invalid number of feature args"},
|
||||
{0, 8, "Invalid number of feature args"},
|
||||
};
|
||||
unsigned int opt_params, val;
|
||||
const char *opt_string, *sval;
|
||||
@@ -2868,6 +2932,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
|
||||
|
||||
else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
|
||||
set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
|
||||
else if (!strcasecmp(opt_string, "no_read_workqueue"))
|
||||
set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
|
||||
else if (!strcasecmp(opt_string, "no_write_workqueue"))
|
||||
set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
|
||||
else if (sscanf(opt_string, "integrity:%u:", &val) == 1) {
|
||||
if (val == 0 || val > MAX_TAG_SIZE) {
|
||||
ti->error = "Invalid integrity arguments";
|
||||
@@ -2908,6 +2976,21 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
||||
static int crypt_report_zones(struct dm_target *ti,
|
||||
struct dm_report_zones_args *args, unsigned int nr_zones)
|
||||
{
|
||||
struct crypt_config *cc = ti->private;
|
||||
sector_t sector = cc->start + dm_target_offset(ti, args->next_sector);
|
||||
|
||||
args->start = cc->start;
|
||||
return blkdev_report_zones(cc->dev->bdev, sector, nr_zones,
|
||||
dm_report_zones_cb, args);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Construct an encryption mapping:
|
||||
* <cipher> [<key>|:<key_size>:<user|logon>:<key_description>] <iv_offset> <dev_path> <start>
|
||||
@@ -3041,6 +3124,16 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
}
|
||||
cc->start = tmpll;
|
||||
|
||||
/*
|
||||
* For zoned block devices, we need to preserve the issuer write
|
||||
* ordering. To do so, disable write workqueues and force inline
|
||||
* encryption completion.
|
||||
*/
|
||||
if (bdev_is_zoned(cc->dev->bdev)) {
|
||||
set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
|
||||
set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
|
||||
}
|
||||
|
||||
if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
|
||||
ret = crypt_integrity_ctr(cc, ti);
|
||||
if (ret)
|
||||
@@ -3196,6 +3289,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
|
||||
num_feature_args += !!ti->num_discard_bios;
|
||||
num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
|
||||
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
|
||||
num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
|
||||
num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
|
||||
num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
|
||||
num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
|
||||
if (cc->on_disk_tag_size)
|
||||
@@ -3208,6 +3303,10 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
|
||||
DMEMIT(" same_cpu_crypt");
|
||||
if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
|
||||
DMEMIT(" submit_from_crypt_cpus");
|
||||
if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
|
||||
DMEMIT(" no_read_workqueue");
|
||||
if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
|
||||
DMEMIT(" no_write_workqueue");
|
||||
if (cc->on_disk_tag_size)
|
||||
DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
|
||||
if (cc->sector_size != (1 << SECTOR_SHIFT))
|
||||
@@ -3320,10 +3419,14 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
|
||||
static struct target_type crypt_target = {
|
||||
.name = "crypt",
|
||||
.version = {1, 21, 0},
|
||||
.version = {1, 22, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = crypt_ctr,
|
||||
.dtr = crypt_dtr,
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
.features = DM_TARGET_ZONED_HM,
|
||||
.report_zones = crypt_report_zones,
|
||||
#endif
|
||||
.map = crypt_map,
|
||||
.status = crypt_status,
|
||||
.postsuspend = crypt_postsuspend,
|
||||
|
Reference in New Issue
Block a user