Merge tag 'v5.0-rc6' into for-5.1/block
Pull in 5.0-rc6 to avoid a dumb merge conflict with fs/iomap.c. This is needed since io_uring is now based on the block branch, to avoid a conflict between the multi-page bvecs and the bits of io_uring that touch the core block parts. * tag 'v5.0-rc6': (525 commits) Linux 5.0-rc6 x86/mm: Make set_pmd_at() paravirt aware MAINTAINERS: Update the ocores i2c bus driver maintainer, etc blk-mq: remove duplicated definition of blk_mq_freeze_queue Blk-iolatency: warn on negative inflight IO counter blk-iolatency: fix IO hang due to negative inflight counter MAINTAINERS: unify reference to xen-devel list x86/mm/cpa: Fix set_mce_nospec() futex: Handle early deadlock return correctly futex: Fix barrier comment net: dsa: b53: Fix for failure when irq is not defined in dt blktrace: Show requests without sector mips: cm: reprime error cause mips: loongson64: remove unreachable(), fix loongson_poweroff(). sit: check if IPv6 enabled before calling ip6_err_gen_icmpv6_unreach() geneve: should not call rt6_lookup() when ipv6 was disabled KVM: nVMX: unconditionally cancel preemption timer in free_nested (CVE-2019-7221) KVM: x86: work around leak of uninitialized stack contents (CVE-2019-7222) kvm: fix kvm_ioctl_create_device() reference counting (CVE-2019-6974) signal: Better detection of synchronous signals ...
This commit is contained in:
@@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
|
||||
static void rq_completed(struct mapped_device *md)
|
||||
{
|
||||
/* nudge anyone waiting on suspend queue */
|
||||
if (unlikely(waitqueue_active(&md->wait)))
|
||||
if (unlikely(wq_has_sleeper(&md->wait)))
|
||||
wake_up(&md->wait);
|
||||
|
||||
/*
|
||||
|
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io)
|
||||
true, duration, &io->stats_aux);
|
||||
|
||||
/* nudge anyone waiting on suspend queue */
|
||||
if (unlikely(waitqueue_active(&md->wait)))
|
||||
if (unlikely(wq_has_sleeper(&md->wait)))
|
||||
wake_up(&md->wait);
|
||||
}
|
||||
|
||||
@@ -1336,7 +1336,11 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
|
||||
return r;
|
||||
}
|
||||
|
||||
bio_trim(clone, sector - clone->bi_iter.bi_sector, len);
|
||||
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
|
||||
clone->bi_iter.bi_size = to_bytes(len);
|
||||
|
||||
if (bio_integrity(bio))
|
||||
bio_integrity_trim(clone);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1935,12 +1935,14 @@ out:
|
||||
}
|
||||
|
||||
static struct stripe_head *
|
||||
r5c_recovery_alloc_stripe(struct r5conf *conf,
|
||||
sector_t stripe_sect)
|
||||
r5c_recovery_alloc_stripe(
|
||||
struct r5conf *conf,
|
||||
sector_t stripe_sect,
|
||||
int noblock)
|
||||
{
|
||||
struct stripe_head *sh;
|
||||
|
||||
sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0);
|
||||
sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
|
||||
if (!sh)
|
||||
return NULL; /* no more stripe available */
|
||||
|
||||
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
||||
stripe_sect);
|
||||
|
||||
if (!sh) {
|
||||
sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
|
||||
sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
|
||||
/*
|
||||
* cannot get stripe from raid5_get_active_stripe
|
||||
* try replay some stripes
|
||||
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
|
||||
r5c_recovery_replay_stripes(
|
||||
cached_stripe_list, ctx);
|
||||
sh = r5c_recovery_alloc_stripe(
|
||||
conf, stripe_sect);
|
||||
conf, stripe_sect, 1);
|
||||
}
|
||||
if (!sh) {
|
||||
int new_size = conf->min_nr_stripes * 2;
|
||||
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
|
||||
mdname(mddev),
|
||||
conf->min_nr_stripes * 2);
|
||||
raid5_set_cache_size(mddev,
|
||||
conf->min_nr_stripes * 2);
|
||||
sh = r5c_recovery_alloc_stripe(conf,
|
||||
stripe_sect);
|
||||
new_size);
|
||||
ret = raid5_set_cache_size(mddev, new_size);
|
||||
if (conf->min_nr_stripes <= new_size / 2) {
|
||||
pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
|
||||
mdname(mddev),
|
||||
ret,
|
||||
new_size,
|
||||
conf->min_nr_stripes,
|
||||
conf->max_nr_stripes);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sh = r5c_recovery_alloc_stripe(
|
||||
conf, stripe_sect, 0);
|
||||
}
|
||||
if (!sh) {
|
||||
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
|
||||
mdname(mddev));
|
||||
mdname(mddev));
|
||||
return -ENOMEM;
|
||||
}
|
||||
list_add_tail(&sh->lru, cached_stripe_list);
|
||||
|
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
|
||||
int
|
||||
raid5_set_cache_size(struct mddev *mddev, int size)
|
||||
{
|
||||
int result = 0;
|
||||
struct r5conf *conf = mddev->private;
|
||||
|
||||
if (size <= 16 || size > 32768)
|
||||
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
|
||||
|
||||
mutex_lock(&conf->cache_size_mutex);
|
||||
while (size > conf->max_nr_stripes)
|
||||
if (!grow_one_stripe(conf, GFP_KERNEL))
|
||||
if (!grow_one_stripe(conf, GFP_KERNEL)) {
|
||||
conf->min_nr_stripes = conf->max_nr_stripes;
|
||||
result = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&conf->cache_size_mutex);
|
||||
|
||||
return 0;
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL(raid5_set_cache_size);
|
||||
|
||||
|
Reference in New Issue
Block a user