Merge tag 'for-5.9/block-20200802' of git://git.kernel.dk/linux-block
Pull core block updates from Jens Axboe: "Good amount of cleanups and tech debt removals in here, and as a result, the diffstat shows a nice net reduction in code. - Softirq completion cleanups (Christoph) - Stop using ->queuedata (Christoph) - Cleanup bd claiming (Christoph) - Use check_events, moving away from the legacy media change (Christoph) - Use inode i_blkbits consistently (Christoph) - Remove old unused writeback congestion bits (Christoph) - Cleanup/unify submission path (Christoph) - Use bio_uninit consistently, instead of bio_disassociate_blkg (Christoph) - sbitmap cleared bits handling (John) - Request merging blktrace event addition (Jan) - sysfs add/remove race fixes (Luis) - blk-mq tag fixes/optimizations (Ming) - Duplicate words in comments (Randy) - Flush deferral cleanup (Yufen) - IO context locking/retry fixes (John) - struct_size() usage (Gustavo) - blk-iocost fixes (Chengming) - blk-cgroup IO stats fixes (Boris) - Various little fixes" * tag 'for-5.9/block-20200802' of git://git.kernel.dk/linux-block: (135 commits) block: blk-timeout: delete duplicated word block: blk-mq-sched: delete duplicated word block: blk-mq: delete duplicated word block: genhd: delete duplicated words block: elevator: delete duplicated word and fix typos block: bio: delete duplicated words block: bfq-iosched: fix duplicated word iocost_monitor: start from the oldest usage index iocost: Fix check condition of iocg abs_vdebt block: Remove callback typedefs for blk_mq_ops block: Use non _rcu version of list functions for tag_set_list blk-cgroup: show global disk stats in root cgroup io.stat blk-cgroup: make iostat functions visible to stat printing block: improve discard bio alignment in __blkdev_issue_discard() block: change REQ_OP_ZONE_RESET and REQ_OP_ZONE_RESET_ALL to be odd numbers block: defer flush request no matter whether we have elevator block: make blk_timeout_init() static block: remove retry loop in ioc_release_fn() block: remove unnecessary ioc nested locking block: integrate bd_start_claiming into __blkdev_get ...
This commit is contained in:
@@ -64,7 +64,6 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
|
||||
|
||||
raw_spin_unlock_irqrestore(cpu_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cgroup_rstat_updated);
|
||||
|
||||
/**
|
||||
* cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
|
||||
|
@@ -348,7 +348,7 @@ static int __blk_trace_remove(struct request_queue *q)
|
||||
struct blk_trace *bt;
|
||||
|
||||
bt = rcu_replace_pointer(q->blk_trace, NULL,
|
||||
lockdep_is_held(&q->blk_trace_mutex));
|
||||
lockdep_is_held(&q->debugfs_mutex));
|
||||
if (!bt)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -362,9 +362,9 @@ int blk_trace_remove(struct request_queue *q)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&q->blk_trace_mutex);
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
ret = __blk_trace_remove(q);
|
||||
mutex_unlock(&q->blk_trace_mutex);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -483,12 +483,11 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
struct dentry *dir = NULL;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&q->debugfs_mutex);
|
||||
|
||||
if (!buts->buf_size || !buts->buf_nr)
|
||||
return -EINVAL;
|
||||
|
||||
if (!blk_debugfs_root)
|
||||
return -ENOENT;
|
||||
|
||||
strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
|
||||
buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
|
||||
|
||||
@@ -503,7 +502,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
* we can be.
|
||||
*/
|
||||
if (rcu_dereference_protected(q->blk_trace,
|
||||
lockdep_is_held(&q->blk_trace_mutex))) {
|
||||
lockdep_is_held(&q->debugfs_mutex))) {
|
||||
pr_warn("Concurrent blktraces are not allowed on %s\n",
|
||||
buts->name);
|
||||
return -EBUSY;
|
||||
@@ -522,12 +521,29 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
if (!bt->msg_data)
|
||||
goto err;
|
||||
|
||||
ret = -ENOENT;
|
||||
|
||||
dir = debugfs_lookup(buts->name, blk_debugfs_root);
|
||||
if (!dir)
|
||||
/*
|
||||
* When tracing the whole disk reuse the existing debugfs directory
|
||||
* created by the block layer on init. For partitions block devices,
|
||||
* and scsi-generic block devices we create a temporary new debugfs
|
||||
* directory that will be removed once the trace ends.
|
||||
*/
|
||||
if (bdev && bdev == bdev->bd_contains)
|
||||
dir = q->debugfs_dir;
|
||||
else
|
||||
bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
|
||||
|
||||
/*
|
||||
* As blktrace relies on debugfs for its interface the debugfs directory
|
||||
* is required, contrary to the usual mantra of not checking for debugfs
|
||||
* files or directories.
|
||||
*/
|
||||
if (IS_ERR_OR_NULL(dir)) {
|
||||
pr_warn("debugfs_dir not present for %s so skipping\n",
|
||||
buts->name);
|
||||
ret = -ENOENT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
bt->dev = dev;
|
||||
atomic_set(&bt->dropped, 0);
|
||||
INIT_LIST_HEAD(&bt->running_list);
|
||||
@@ -563,8 +579,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
|
||||
ret = 0;
|
||||
err:
|
||||
if (dir && !bt->dir)
|
||||
dput(dir);
|
||||
if (ret)
|
||||
blk_trace_free(bt);
|
||||
return ret;
|
||||
@@ -597,9 +611,9 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&q->blk_trace_mutex);
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
ret = __blk_trace_setup(q, name, dev, bdev, arg);
|
||||
mutex_unlock(&q->blk_trace_mutex);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -645,7 +659,7 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
|
||||
struct blk_trace *bt;
|
||||
|
||||
bt = rcu_dereference_protected(q->blk_trace,
|
||||
lockdep_is_held(&q->blk_trace_mutex));
|
||||
lockdep_is_held(&q->debugfs_mutex));
|
||||
if (bt == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -685,9 +699,9 @@ int blk_trace_startstop(struct request_queue *q, int start)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&q->blk_trace_mutex);
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
ret = __blk_trace_startstop(q, start);
|
||||
mutex_unlock(&q->blk_trace_mutex);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -716,7 +730,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
|
||||
if (!q)
|
||||
return -ENXIO;
|
||||
|
||||
mutex_lock(&q->blk_trace_mutex);
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
|
||||
switch (cmd) {
|
||||
case BLKTRACESETUP:
|
||||
@@ -743,7 +757,7 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&q->blk_trace_mutex);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -754,14 +768,14 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
|
||||
**/
|
||||
void blk_trace_shutdown(struct request_queue *q)
|
||||
{
|
||||
mutex_lock(&q->blk_trace_mutex);
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
if (rcu_dereference_protected(q->blk_trace,
|
||||
lockdep_is_held(&q->blk_trace_mutex))) {
|
||||
lockdep_is_held(&q->debugfs_mutex))) {
|
||||
__blk_trace_startstop(q, 0);
|
||||
__blk_trace_remove(q);
|
||||
}
|
||||
|
||||
mutex_unlock(&q->blk_trace_mutex);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
@@ -846,6 +860,13 @@ static void blk_add_trace_rq_issue(void *ignore,
|
||||
blk_trace_request_get_cgid(q, rq));
|
||||
}
|
||||
|
||||
static void blk_add_trace_rq_merge(void *ignore,
|
||||
struct request_queue *q, struct request *rq)
|
||||
{
|
||||
blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_BACKMERGE,
|
||||
blk_trace_request_get_cgid(q, rq));
|
||||
}
|
||||
|
||||
static void blk_add_trace_rq_requeue(void *ignore,
|
||||
struct request_queue *q,
|
||||
struct request *rq)
|
||||
@@ -1130,6 +1151,8 @@ static void blk_register_tracepoints(void)
|
||||
WARN_ON(ret);
|
||||
ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
|
||||
WARN_ON(ret);
|
||||
ret = register_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
|
||||
WARN_ON(ret);
|
||||
ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
|
||||
WARN_ON(ret);
|
||||
ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
|
||||
@@ -1176,6 +1199,7 @@ static void blk_unregister_tracepoints(void)
|
||||
unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
|
||||
unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
|
||||
unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
|
||||
unregister_trace_block_rq_merge(blk_add_trace_rq_merge, NULL);
|
||||
unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
|
||||
unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
|
||||
|
||||
@@ -1642,7 +1666,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
|
||||
struct blk_trace *bt;
|
||||
|
||||
bt = rcu_replace_pointer(q->blk_trace, NULL,
|
||||
lockdep_is_held(&q->blk_trace_mutex));
|
||||
lockdep_is_held(&q->debugfs_mutex));
|
||||
if (bt == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -1817,10 +1841,10 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
|
||||
if (q == NULL)
|
||||
goto out_bdput;
|
||||
|
||||
mutex_lock(&q->blk_trace_mutex);
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
|
||||
bt = rcu_dereference_protected(q->blk_trace,
|
||||
lockdep_is_held(&q->blk_trace_mutex));
|
||||
lockdep_is_held(&q->debugfs_mutex));
|
||||
if (attr == &dev_attr_enable) {
|
||||
ret = sprintf(buf, "%u\n", !!bt);
|
||||
goto out_unlock_bdev;
|
||||
@@ -1838,7 +1862,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
|
||||
ret = sprintf(buf, "%llu\n", bt->end_lba);
|
||||
|
||||
out_unlock_bdev:
|
||||
mutex_unlock(&q->blk_trace_mutex);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
out_bdput:
|
||||
bdput(bdev);
|
||||
out:
|
||||
@@ -1881,10 +1905,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
|
||||
if (q == NULL)
|
||||
goto out_bdput;
|
||||
|
||||
mutex_lock(&q->blk_trace_mutex);
|
||||
mutex_lock(&q->debugfs_mutex);
|
||||
|
||||
bt = rcu_dereference_protected(q->blk_trace,
|
||||
lockdep_is_held(&q->blk_trace_mutex));
|
||||
lockdep_is_held(&q->debugfs_mutex));
|
||||
if (attr == &dev_attr_enable) {
|
||||
if (!!value == !!bt) {
|
||||
ret = 0;
|
||||
@@ -1901,7 +1925,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
|
||||
if (bt == NULL) {
|
||||
ret = blk_trace_setup_queue(q, bdev);
|
||||
bt = rcu_dereference_protected(q->blk_trace,
|
||||
lockdep_is_held(&q->blk_trace_mutex));
|
||||
lockdep_is_held(&q->debugfs_mutex));
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
@@ -1916,7 +1940,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
|
||||
}
|
||||
|
||||
out_unlock_bdev:
|
||||
mutex_unlock(&q->blk_trace_mutex);
|
||||
mutex_unlock(&q->debugfs_mutex);
|
||||
out_bdput:
|
||||
bdput(bdev);
|
||||
out:
|
||||
|
Reference in New Issue
Block a user