Merge tag 'for-5.4/block-2019-09-16' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: - Two NVMe pull requests: - ana log parse fix from Anton - nvme quirks support for Apple devices from Ben - fix missing bio completion tracing for multipath stack devices from Hannes and Mikhail - IP TOS settings for nvme rdma and tcp transports from Israel - rq_dma_dir cleanups from Israel - tracing for Get LBA Status command from Minwoo - Some nvme-tcp cleanups from Minwoo, Potnuri and Myself - Some consolidation between the fabrics transports for handling the CAP register - reset race with ns scanning fix for fabrics (move fabrics commands to a dedicated request queue with a different lifetime from the admin request queue)." - controller reset and namespace scan races fixes - nvme discovery log change uevent support - naming improvements from Keith - multiple discovery controllers reject fix from James - some regular cleanups from various people - Series fixing (and re-fixing) null_blk debug printing and nr_devices checks (André) - A few pull requests from Song, with fixes from Andy, Guoqing, Guilherme, Neil, Nigel, and Yufen. - REQ_OP_ZONE_RESET_ALL support (Chaitanya) - Bio merge handling unification (Christoph) - Pick default elevator correctly for devices with special needs (Damien) - Block stats fixes (Hou) - Timeout and support devices nbd fixes (Mike) - Series fixing races around elevator switching and device add/remove (Ming) - sed-opal cleanups (Revanth) - Per device weight support for BFQ (Fam) - Support for blk-iocost, a new model that can properly account cost of IO workloads. (Tejun) - blk-cgroup writeback fixes (Tejun) - paride queue init fixes (zhengbin) - blk_set_runtime_active() cleanup (Stanley) - Block segment mapping optimizations (Bart) - lightnvm fixes (Hans/Minwoo/YueHaibing) - Various little fixes and cleanups * tag 'for-5.4/block-2019-09-16' of git://git.kernel.dk/linux-block: (186 commits) null_blk: format pr_* logs with pr_fmt null_blk: match the type of parameter nr_devices null_blk: do not fail the module load with zero devices block: also check RQF_STATS in blk_mq_need_time_stamp() block: make rq sector size accessible for block stats bfq: Fix bfq linkage error raid5: use bio_end_sector in r5_next_bio raid5: remove STRIPE_OPS_REQ_PENDING md: add feature flag MD_FEATURE_RAID0_LAYOUT md/raid0: avoid RAID0 data corruption due to layout confusion. raid5: don't set STRIPE_HANDLE to stripe which is in batch list raid5: don't increment read_errors on EILSEQ return nvmet: fix a wrong error status returned in error log page nvme: send discovery log page change events to userspace nvme: add uevent variables for controller devices nvme: enable aen regardless of the presence of I/O queues nvme-fabrics: allow discovery subsystems accept a kato nvmet: Use PTR_ERR_OR_ZERO() in nvmet_init_discovery() nvme: Remove redundant assignment of cq vector nvme: Assign subsys instance from first ctrl ...
This commit is contained in:
@@ -3780,7 +3780,7 @@ static int compat_getdrvprm(int drive,
|
||||
v.native_format = UDP->native_format;
|
||||
mutex_unlock(&floppy_mutex);
|
||||
|
||||
if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
|
||||
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
@@ -3816,7 +3816,7 @@ static int compat_getdrvstat(int drive, bool poll,
|
||||
v.bufblocks = UDRS->bufblocks;
|
||||
mutex_unlock(&floppy_mutex);
|
||||
|
||||
if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
|
||||
if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
Eintr:
|
||||
|
@@ -1755,6 +1755,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
case LOOP_SET_FD:
|
||||
case LOOP_CHANGE_FD:
|
||||
case LOOP_SET_BLOCK_SIZE:
|
||||
case LOOP_SET_DIRECT_IO:
|
||||
err = lo_ioctl(bdev, mode, cmd, arg);
|
||||
break;
|
||||
default:
|
||||
|
@@ -108,6 +108,7 @@ struct nbd_device {
|
||||
struct nbd_config *config;
|
||||
struct mutex config_lock;
|
||||
struct gendisk *disk;
|
||||
struct workqueue_struct *recv_workq;
|
||||
|
||||
struct list_head list;
|
||||
struct task_struct *task_recv;
|
||||
@@ -121,6 +122,7 @@ struct nbd_cmd {
|
||||
struct mutex lock;
|
||||
int index;
|
||||
int cookie;
|
||||
int retries;
|
||||
blk_status_t status;
|
||||
unsigned long flags;
|
||||
u32 cmd_cookie;
|
||||
@@ -138,7 +140,6 @@ static struct dentry *nbd_dbg_dir;
|
||||
|
||||
static unsigned int nbds_max = 16;
|
||||
static int max_part = 16;
|
||||
static struct workqueue_struct *recv_workqueue;
|
||||
static int part_shift;
|
||||
|
||||
static int nbd_dev_dbg_init(struct nbd_device *nbd);
|
||||
@@ -344,6 +345,22 @@ static void sock_shutdown(struct nbd_device *nbd)
|
||||
dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
|
||||
}
|
||||
|
||||
static u32 req_to_nbd_cmd_type(struct request *req)
|
||||
{
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_DISCARD:
|
||||
return NBD_CMD_TRIM;
|
||||
case REQ_OP_FLUSH:
|
||||
return NBD_CMD_FLUSH;
|
||||
case REQ_OP_WRITE:
|
||||
return NBD_CMD_WRITE;
|
||||
case REQ_OP_READ:
|
||||
return NBD_CMD_READ;
|
||||
default:
|
||||
return U32_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
||||
bool reserved)
|
||||
{
|
||||
@@ -357,8 +374,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
||||
}
|
||||
config = nbd->config;
|
||||
|
||||
if (!mutex_trylock(&cmd->lock))
|
||||
if (!mutex_trylock(&cmd->lock)) {
|
||||
nbd_config_put(nbd);
|
||||
return BLK_EH_RESET_TIMER;
|
||||
}
|
||||
|
||||
if (config->num_connections > 1) {
|
||||
dev_err_ratelimited(nbd_to_dev(nbd),
|
||||
@@ -389,10 +408,25 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
||||
nbd_config_put(nbd);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
} else {
|
||||
dev_err_ratelimited(nbd_to_dev(nbd),
|
||||
"Connection timed out\n");
|
||||
}
|
||||
|
||||
if (!nbd->tag_set.timeout) {
|
||||
/*
|
||||
* Userspace sets timeout=0 to disable socket disconnection,
|
||||
* so just warn and reset the timer.
|
||||
*/
|
||||
cmd->retries++;
|
||||
dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
|
||||
req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
|
||||
(unsigned long long)blk_rq_pos(req) << 9,
|
||||
blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
|
||||
|
||||
mutex_unlock(&cmd->lock);
|
||||
nbd_config_put(nbd);
|
||||
return BLK_EH_RESET_TIMER;
|
||||
}
|
||||
|
||||
dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
|
||||
set_bit(NBD_TIMEDOUT, &config->runtime_flags);
|
||||
cmd->status = BLK_STS_IOERR;
|
||||
mutex_unlock(&cmd->lock);
|
||||
@@ -480,22 +514,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
|
||||
iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
|
||||
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_DISCARD:
|
||||
type = NBD_CMD_TRIM;
|
||||
break;
|
||||
case REQ_OP_FLUSH:
|
||||
type = NBD_CMD_FLUSH;
|
||||
break;
|
||||
case REQ_OP_WRITE:
|
||||
type = NBD_CMD_WRITE;
|
||||
break;
|
||||
case REQ_OP_READ:
|
||||
type = NBD_CMD_READ;
|
||||
break;
|
||||
default:
|
||||
type = req_to_nbd_cmd_type(req);
|
||||
if (type == U32_MAX)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (rq_data_dir(req) == WRITE &&
|
||||
(config->flags & NBD_FLAG_READ_ONLY)) {
|
||||
@@ -526,6 +547,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
||||
}
|
||||
cmd->index = index;
|
||||
cmd->cookie = nsock->cookie;
|
||||
cmd->retries = 0;
|
||||
request.type = htonl(type | nbd_cmd_flags);
|
||||
if (type != NBD_CMD_FLUSH) {
|
||||
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
|
||||
@@ -1036,7 +1058,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
|
||||
/* We take the tx_mutex in an error path in the recv_work, so we
|
||||
* need to queue_work outside of the tx_mutex.
|
||||
*/
|
||||
queue_work(recv_workqueue, &args->work);
|
||||
queue_work(nbd->recv_workq, &args->work);
|
||||
|
||||
atomic_inc(&config->live_connections);
|
||||
wake_up(&config->conn_wait);
|
||||
@@ -1137,6 +1159,10 @@ static void nbd_config_put(struct nbd_device *nbd)
|
||||
kfree(nbd->config);
|
||||
nbd->config = NULL;
|
||||
|
||||
if (nbd->recv_workq)
|
||||
destroy_workqueue(nbd->recv_workq);
|
||||
nbd->recv_workq = NULL;
|
||||
|
||||
nbd->tag_set.timeout = 0;
|
||||
nbd->disk->queue->limits.discard_granularity = 0;
|
||||
nbd->disk->queue->limits.discard_alignment = 0;
|
||||
@@ -1165,6 +1191,14 @@ static int nbd_start_device(struct nbd_device *nbd)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nbd->recv_workq = alloc_workqueue("knbd%d-recv",
|
||||
WQ_MEM_RECLAIM | WQ_HIGHPRI |
|
||||
WQ_UNBOUND, 0, nbd->index);
|
||||
if (!nbd->recv_workq) {
|
||||
dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
|
||||
nbd->task_recv = current;
|
||||
|
||||
@@ -1195,7 +1229,7 @@ static int nbd_start_device(struct nbd_device *nbd)
|
||||
INIT_WORK(&args->work, recv_work);
|
||||
args->nbd = nbd;
|
||||
args->index = i;
|
||||
queue_work(recv_workqueue, &args->work);
|
||||
queue_work(nbd->recv_workq, &args->work);
|
||||
}
|
||||
nbd_size_update(nbd);
|
||||
return error;
|
||||
@@ -1215,8 +1249,10 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
ret = wait_event_interruptible(config->recv_wq,
|
||||
atomic_read(&config->recv_threads) == 0);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
sock_shutdown(nbd);
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
}
|
||||
mutex_lock(&nbd->config_lock);
|
||||
nbd_bdev_reset(bdev);
|
||||
/* user requested, ignore socket errors */
|
||||
@@ -1246,6 +1282,13 @@ static bool nbd_is_valid_blksize(unsigned long blksize)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
|
||||
{
|
||||
nbd->tag_set.timeout = timeout * HZ;
|
||||
if (timeout)
|
||||
blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
|
||||
}
|
||||
|
||||
/* Must be called with config_lock held */
|
||||
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
@@ -1276,10 +1319,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
||||
nbd_size_set(nbd, config->blksize, arg);
|
||||
return 0;
|
||||
case NBD_SET_TIMEOUT:
|
||||
if (arg) {
|
||||
nbd->tag_set.timeout = arg * HZ;
|
||||
blk_queue_rq_timeout(nbd->disk->queue, arg * HZ);
|
||||
}
|
||||
nbd_set_cmd_timeout(nbd, arg);
|
||||
return 0;
|
||||
|
||||
case NBD_SET_FLAGS:
|
||||
@@ -1799,11 +1839,9 @@ again:
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (info->attrs[NBD_ATTR_TIMEOUT]) {
|
||||
u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
|
||||
nbd->tag_set.timeout = timeout * HZ;
|
||||
blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
|
||||
}
|
||||
if (info->attrs[NBD_ATTR_TIMEOUT])
|
||||
nbd_set_cmd_timeout(nbd,
|
||||
nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
|
||||
if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
|
||||
config->dead_conn_timeout =
|
||||
nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
|
||||
@@ -1875,6 +1913,12 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
|
||||
nbd_disconnect(nbd);
|
||||
nbd_clear_sock(nbd);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
/*
|
||||
* Make sure recv thread has finished, so it does not drop the last
|
||||
* config ref and try to destroy the workqueue from inside the work
|
||||
* queue.
|
||||
*/
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
|
||||
&nbd->config->runtime_flags))
|
||||
nbd_config_put(nbd);
|
||||
@@ -1971,11 +2015,9 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (info->attrs[NBD_ATTR_TIMEOUT]) {
|
||||
u64 timeout = nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]);
|
||||
nbd->tag_set.timeout = timeout * HZ;
|
||||
blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
|
||||
}
|
||||
if (info->attrs[NBD_ATTR_TIMEOUT])
|
||||
nbd_set_cmd_timeout(nbd,
|
||||
nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
|
||||
if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
|
||||
config->dead_conn_timeout =
|
||||
nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
|
||||
@@ -2261,20 +2303,12 @@ static int __init nbd_init(void)
|
||||
|
||||
if (nbds_max > 1UL << (MINORBITS - part_shift))
|
||||
return -EINVAL;
|
||||
recv_workqueue = alloc_workqueue("knbd-recv",
|
||||
WQ_MEM_RECLAIM | WQ_HIGHPRI |
|
||||
WQ_UNBOUND, 0);
|
||||
if (!recv_workqueue)
|
||||
return -ENOMEM;
|
||||
|
||||
if (register_blkdev(NBD_MAJOR, "nbd")) {
|
||||
destroy_workqueue(recv_workqueue);
|
||||
if (register_blkdev(NBD_MAJOR, "nbd"))
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (genl_register_family(&nbd_genl_family)) {
|
||||
unregister_blkdev(NBD_MAJOR, "nbd");
|
||||
destroy_workqueue(recv_workqueue);
|
||||
return -EINVAL;
|
||||
}
|
||||
nbd_dbg_init();
|
||||
@@ -2316,7 +2350,6 @@ static void __exit nbd_cleanup(void)
|
||||
|
||||
idr_destroy(&nbd_index_idr);
|
||||
genl_unregister_family(&nbd_genl_family);
|
||||
destroy_workqueue(recv_workqueue);
|
||||
unregister_blkdev(NBD_MAJOR, "nbd");
|
||||
}
|
||||
|
||||
|
@@ -2,6 +2,9 @@
|
||||
#ifndef __BLK_NULL_BLK_H
|
||||
#define __BLK_NULL_BLK_H
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blk-mq.h>
|
||||
@@ -90,13 +93,13 @@ int null_zone_init(struct nullb_device *dev);
|
||||
void null_zone_exit(struct nullb_device *dev);
|
||||
int null_zone_report(struct gendisk *disk, sector_t sector,
|
||||
struct blk_zone *zones, unsigned int *nr_zones);
|
||||
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
unsigned int nr_sectors);
|
||||
void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
|
||||
blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
|
||||
enum req_opf op, sector_t sector,
|
||||
sector_t nr_sectors);
|
||||
#else
|
||||
static inline int null_zone_init(struct nullb_device *dev)
|
||||
{
|
||||
pr_err("null_blk: CONFIG_BLK_DEV_ZONED not enabled\n");
|
||||
pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void null_zone_exit(struct nullb_device *dev) {}
|
||||
@@ -106,10 +109,11 @@ static inline int null_zone_report(struct gendisk *disk, sector_t sector,
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
unsigned int nr_sectors)
|
||||
static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd,
|
||||
enum req_opf op, sector_t sector,
|
||||
sector_t nr_sectors)
|
||||
{
|
||||
return BLK_STS_NOTSUPP;
|
||||
}
|
||||
static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
|
||||
#endif /* CONFIG_BLK_DEV_ZONED */
|
||||
#endif /* __NULL_BLK_H */
|
||||
|
@@ -141,8 +141,8 @@ static int g_bs = 512;
|
||||
module_param_named(bs, g_bs, int, 0444);
|
||||
MODULE_PARM_DESC(bs, "Block size (in bytes)");
|
||||
|
||||
static int nr_devices = 1;
|
||||
module_param(nr_devices, int, 0444);
|
||||
static unsigned int nr_devices = 1;
|
||||
module_param(nr_devices, uint, 0444);
|
||||
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
|
||||
|
||||
static bool g_blocking;
|
||||
@@ -1133,93 +1133,61 @@ static void null_restart_queue_async(struct nullb *nullb)
|
||||
blk_mq_start_stopped_hw_queues(q, true);
|
||||
}
|
||||
|
||||
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
|
||||
static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
|
||||
{
|
||||
struct nullb_device *dev = cmd->nq->dev;
|
||||
struct nullb *nullb = dev->nullb;
|
||||
int err = 0;
|
||||
blk_status_t sts = BLK_STS_OK;
|
||||
struct request *rq = cmd->rq;
|
||||
|
||||
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
|
||||
struct request *rq = cmd->rq;
|
||||
if (!hrtimer_active(&nullb->bw_timer))
|
||||
hrtimer_restart(&nullb->bw_timer);
|
||||
|
||||
if (!hrtimer_active(&nullb->bw_timer))
|
||||
hrtimer_restart(&nullb->bw_timer);
|
||||
|
||||
if (atomic_long_sub_return(blk_rq_bytes(rq),
|
||||
&nullb->cur_bytes) < 0) {
|
||||
null_stop_queue(nullb);
|
||||
/* race with timer */
|
||||
if (atomic_long_read(&nullb->cur_bytes) > 0)
|
||||
null_restart_queue_async(nullb);
|
||||
/* requeue request */
|
||||
return BLK_STS_DEV_RESOURCE;
|
||||
}
|
||||
if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
|
||||
null_stop_queue(nullb);
|
||||
/* race with timer */
|
||||
if (atomic_long_read(&nullb->cur_bytes) > 0)
|
||||
null_restart_queue_async(nullb);
|
||||
/* requeue request */
|
||||
sts = BLK_STS_DEV_RESOURCE;
|
||||
}
|
||||
return sts;
|
||||
}
|
||||
|
||||
if (nullb->dev->badblocks.shift != -1) {
|
||||
int bad_sectors;
|
||||
sector_t sector, size, first_bad;
|
||||
bool is_flush = true;
|
||||
static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
|
||||
sector_t sector,
|
||||
sector_t nr_sectors)
|
||||
{
|
||||
struct badblocks *bb = &cmd->nq->dev->badblocks;
|
||||
sector_t first_bad;
|
||||
int bad_sectors;
|
||||
|
||||
if (dev->queue_mode == NULL_Q_BIO &&
|
||||
bio_op(cmd->bio) != REQ_OP_FLUSH) {
|
||||
is_flush = false;
|
||||
sector = cmd->bio->bi_iter.bi_sector;
|
||||
size = bio_sectors(cmd->bio);
|
||||
}
|
||||
if (dev->queue_mode != NULL_Q_BIO &&
|
||||
req_op(cmd->rq) != REQ_OP_FLUSH) {
|
||||
is_flush = false;
|
||||
sector = blk_rq_pos(cmd->rq);
|
||||
size = blk_rq_sectors(cmd->rq);
|
||||
}
|
||||
if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
|
||||
size, &first_bad, &bad_sectors)) {
|
||||
cmd->error = BLK_STS_IOERR;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
if (dev->memory_backed) {
|
||||
if (dev->queue_mode == NULL_Q_BIO) {
|
||||
if (bio_op(cmd->bio) == REQ_OP_FLUSH)
|
||||
err = null_handle_flush(nullb);
|
||||
else
|
||||
err = null_handle_bio(cmd);
|
||||
} else {
|
||||
if (req_op(cmd->rq) == REQ_OP_FLUSH)
|
||||
err = null_handle_flush(nullb);
|
||||
else
|
||||
err = null_handle_rq(cmd);
|
||||
}
|
||||
}
|
||||
cmd->error = errno_to_blk_status(err);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
if (!cmd->error && dev->zoned) {
|
||||
sector_t sector;
|
||||
unsigned int nr_sectors;
|
||||
enum req_opf op;
|
||||
static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
|
||||
enum req_opf op)
|
||||
{
|
||||
struct nullb_device *dev = cmd->nq->dev;
|
||||
int err;
|
||||
|
||||
if (dev->queue_mode == NULL_Q_BIO) {
|
||||
op = bio_op(cmd->bio);
|
||||
sector = cmd->bio->bi_iter.bi_sector;
|
||||
nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
|
||||
} else {
|
||||
op = req_op(cmd->rq);
|
||||
sector = blk_rq_pos(cmd->rq);
|
||||
nr_sectors = blk_rq_sectors(cmd->rq);
|
||||
}
|
||||
if (dev->queue_mode == NULL_Q_BIO)
|
||||
err = null_handle_bio(cmd);
|
||||
else
|
||||
err = null_handle_rq(cmd);
|
||||
|
||||
if (op == REQ_OP_WRITE)
|
||||
null_zone_write(cmd, sector, nr_sectors);
|
||||
else if (op == REQ_OP_ZONE_RESET)
|
||||
null_zone_reset(cmd, sector);
|
||||
}
|
||||
out:
|
||||
return errno_to_blk_status(err);
|
||||
}
|
||||
|
||||
static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
|
||||
{
|
||||
/* Complete IO by inline, softirq or timer */
|
||||
switch (dev->irqmode) {
|
||||
switch (cmd->nq->dev->irqmode) {
|
||||
case NULL_IRQ_SOFTIRQ:
|
||||
switch (dev->queue_mode) {
|
||||
switch (cmd->nq->dev->queue_mode) {
|
||||
case NULL_Q_MQ:
|
||||
blk_mq_complete_request(cmd->rq);
|
||||
break;
|
||||
@@ -1238,6 +1206,40 @@ out:
|
||||
null_cmd_end_timer(cmd);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
|
||||
sector_t nr_sectors, enum req_opf op)
|
||||
{
|
||||
struct nullb_device *dev = cmd->nq->dev;
|
||||
struct nullb *nullb = dev->nullb;
|
||||
blk_status_t sts;
|
||||
|
||||
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
|
||||
sts = null_handle_throttled(cmd);
|
||||
if (sts != BLK_STS_OK)
|
||||
return sts;
|
||||
}
|
||||
|
||||
if (op == REQ_OP_FLUSH) {
|
||||
cmd->error = errno_to_blk_status(null_handle_flush(nullb));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (nullb->dev->badblocks.shift != -1) {
|
||||
cmd->error = null_handle_badblocks(cmd, sector, nr_sectors);
|
||||
if (cmd->error != BLK_STS_OK)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dev->memory_backed)
|
||||
cmd->error = null_handle_memory_backed(cmd, op);
|
||||
|
||||
if (!cmd->error && dev->zoned)
|
||||
cmd->error = null_handle_zoned(cmd, op, sector, nr_sectors);
|
||||
|
||||
out:
|
||||
nullb_complete_cmd(cmd);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
@@ -1280,6 +1282,8 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
|
||||
|
||||
static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
sector_t sector = bio->bi_iter.bi_sector;
|
||||
sector_t nr_sectors = bio_sectors(bio);
|
||||
struct nullb *nullb = q->queuedata;
|
||||
struct nullb_queue *nq = nullb_to_queue(nullb);
|
||||
struct nullb_cmd *cmd;
|
||||
@@ -1287,7 +1291,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
cmd = alloc_cmd(nq, 1);
|
||||
cmd->bio = bio;
|
||||
|
||||
null_handle_cmd(cmd);
|
||||
null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
@@ -1311,7 +1315,7 @@ static bool should_requeue_request(struct request *rq)
|
||||
|
||||
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
|
||||
{
|
||||
pr_info("null: rq %p timed out\n", rq);
|
||||
pr_info("rq %p timed out\n", rq);
|
||||
blk_mq_complete_request(rq);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
@@ -1321,6 +1325,8 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
{
|
||||
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
|
||||
struct nullb_queue *nq = hctx->driver_data;
|
||||
sector_t nr_sectors = blk_rq_sectors(bd->rq);
|
||||
sector_t sector = blk_rq_pos(bd->rq);
|
||||
|
||||
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
|
||||
|
||||
@@ -1349,7 +1355,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
if (should_timeout_request(bd->rq))
|
||||
return BLK_STS_OK;
|
||||
|
||||
return null_handle_cmd(cmd);
|
||||
return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
|
||||
}
|
||||
|
||||
static const struct blk_mq_ops null_mq_ops = {
|
||||
@@ -1688,6 +1694,9 @@ static int null_add_dev(struct nullb_device *dev)
|
||||
|
||||
blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
|
||||
nullb->q->limits.zoned = BLK_ZONED_HM;
|
||||
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
|
||||
blk_queue_required_elevator_features(nullb->q,
|
||||
ELEVATOR_F_ZBD_SEQ_WRITE);
|
||||
}
|
||||
|
||||
nullb->q->queuedata = nullb;
|
||||
@@ -1739,28 +1748,28 @@ static int __init null_init(void)
|
||||
struct nullb_device *dev;
|
||||
|
||||
if (g_bs > PAGE_SIZE) {
|
||||
pr_warn("null_blk: invalid block size\n");
|
||||
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
|
||||
pr_warn("invalid block size\n");
|
||||
pr_warn("defaults block size to %lu\n", PAGE_SIZE);
|
||||
g_bs = PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(g_zone_size)) {
|
||||
pr_err("null_blk: zone_size must be power-of-two\n");
|
||||
pr_err("zone_size must be power-of-two\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
|
||||
pr_err("null_blk: invalid home_node value\n");
|
||||
pr_err("invalid home_node value\n");
|
||||
g_home_node = NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
if (g_queue_mode == NULL_Q_RQ) {
|
||||
pr_err("null_blk: legacy IO path no longer available\n");
|
||||
pr_err("legacy IO path no longer available\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
|
||||
if (g_submit_queues != nr_online_nodes) {
|
||||
pr_warn("null_blk: submit_queues param is set to %u.\n",
|
||||
pr_warn("submit_queues param is set to %u.\n",
|
||||
nr_online_nodes);
|
||||
g_submit_queues = nr_online_nodes;
|
||||
}
|
||||
@@ -1803,7 +1812,7 @@ static int __init null_init(void)
|
||||
}
|
||||
}
|
||||
|
||||
pr_info("null: module loaded\n");
|
||||
pr_info("module loaded\n");
|
||||
return 0;
|
||||
|
||||
err_dev:
|
||||
|
@@ -17,7 +17,7 @@ int null_zone_init(struct nullb_device *dev)
|
||||
unsigned int i;
|
||||
|
||||
if (!is_power_of_2(dev->zone_size)) {
|
||||
pr_err("null_blk: zone_size must be power-of-two\n");
|
||||
pr_err("zone_size must be power-of-two\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ int null_zone_init(struct nullb_device *dev)
|
||||
|
||||
if (dev->zone_nr_conv >= dev->nr_zones) {
|
||||
dev->zone_nr_conv = dev->nr_zones - 1;
|
||||
pr_info("null_blk: changed the number of conventional zones to %u",
|
||||
pr_info("changed the number of conventional zones to %u",
|
||||
dev->zone_nr_conv);
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ int null_zone_report(struct gendisk *disk, sector_t sector,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
unsigned int nr_sectors)
|
||||
{
|
||||
struct nullb_device *dev = cmd->nq->dev;
|
||||
@@ -95,14 +95,12 @@ void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
case BLK_ZONE_COND_FULL:
|
||||
/* Cannot write to a full zone */
|
||||
cmd->error = BLK_STS_IOERR;
|
||||
break;
|
||||
return BLK_STS_IOERR;
|
||||
case BLK_ZONE_COND_EMPTY:
|
||||
case BLK_ZONE_COND_IMP_OPEN:
|
||||
/* Writes must be at the write pointer position */
|
||||
if (sector != zone->wp) {
|
||||
cmd->error = BLK_STS_IOERR;
|
||||
break;
|
||||
}
|
||||
if (sector != zone->wp)
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
if (zone->cond == BLK_ZONE_COND_EMPTY)
|
||||
zone->cond = BLK_ZONE_COND_IMP_OPEN;
|
||||
@@ -115,22 +113,51 @@ void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
break;
|
||||
default:
|
||||
/* Invalid zone condition */
|
||||
cmd->error = BLK_STS_IOERR;
|
||||
break;
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
|
||||
static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
|
||||
{
|
||||
struct nullb_device *dev = cmd->nq->dev;
|
||||
unsigned int zno = null_zone_no(dev, sector);
|
||||
struct blk_zone *zone = &dev->zones[zno];
|
||||
size_t i;
|
||||
|
||||
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
|
||||
cmd->error = BLK_STS_IOERR;
|
||||
return;
|
||||
switch (req_op(cmd->rq)) {
|
||||
case REQ_OP_ZONE_RESET_ALL:
|
||||
for (i = 0; i < dev->nr_zones; i++) {
|
||||
if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
|
||||
continue;
|
||||
zone[i].cond = BLK_ZONE_COND_EMPTY;
|
||||
zone[i].wp = zone[i].start;
|
||||
}
|
||||
break;
|
||||
case REQ_OP_ZONE_RESET:
|
||||
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
zone->cond = BLK_ZONE_COND_EMPTY;
|
||||
zone->wp = zone->start;
|
||||
break;
|
||||
default:
|
||||
cmd->error = BLK_STS_NOTSUPP;
|
||||
break;
|
||||
}
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
|
||||
sector_t sector, sector_t nr_sectors)
|
||||
{
|
||||
switch (op) {
|
||||
case REQ_OP_WRITE:
|
||||
return null_zone_write(cmd, sector, nr_sectors);
|
||||
case REQ_OP_ZONE_RESET:
|
||||
case REQ_OP_ZONE_RESET_ALL:
|
||||
return null_zone_reset(cmd, sector);
|
||||
default:
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
zone->cond = BLK_ZONE_COND_EMPTY;
|
||||
zone->wp = zone->start;
|
||||
}
|
||||
|
@@ -314,8 +314,8 @@ static void pcd_init_units(void)
|
||||
disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
|
||||
1, BLK_MQ_F_SHOULD_MERGE);
|
||||
if (IS_ERR(disk->queue)) {
|
||||
put_disk(disk);
|
||||
disk->queue = NULL;
|
||||
put_disk(disk);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -723,9 +723,9 @@ static int pcd_detect(void)
|
||||
k = 0;
|
||||
if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
|
||||
cd = pcd;
|
||||
if (pi_init(cd->pi, 1, -1, -1, -1, -1, -1, pcd_buffer,
|
||||
PI_PCD, verbose, cd->name)) {
|
||||
if (!pcd_probe(cd, -1, id) && cd->disk) {
|
||||
if (cd->disk && pi_init(cd->pi, 1, -1, -1, -1, -1, -1,
|
||||
pcd_buffer, PI_PCD, verbose, cd->name)) {
|
||||
if (!pcd_probe(cd, -1, id)) {
|
||||
cd->present = 1;
|
||||
k++;
|
||||
} else
|
||||
@@ -736,11 +736,13 @@ static int pcd_detect(void)
|
||||
int *conf = *drives[unit];
|
||||
if (!conf[D_PRT])
|
||||
continue;
|
||||
if (!cd->disk)
|
||||
continue;
|
||||
if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD],
|
||||
conf[D_UNI], conf[D_PRO], conf[D_DLY],
|
||||
pcd_buffer, PI_PCD, verbose, cd->name))
|
||||
continue;
|
||||
if (!pcd_probe(cd, conf[D_SLV], id) && cd->disk) {
|
||||
if (!pcd_probe(cd, conf[D_SLV], id)) {
|
||||
cd->present = 1;
|
||||
k++;
|
||||
} else
|
||||
|
@@ -300,8 +300,8 @@ static void __init pf_init_units(void)
|
||||
disk->queue = blk_mq_init_sq_queue(&pf->tag_set, &pf_mq_ops,
|
||||
1, BLK_MQ_F_SHOULD_MERGE);
|
||||
if (IS_ERR(disk->queue)) {
|
||||
put_disk(disk);
|
||||
disk->queue = NULL;
|
||||
put_disk(disk);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user