Merge branch 'for-4.2/core' of git://git.kernel.dk/linux-block
Pull core block IO update from Jens Axboe: "Nothing really major in here, mostly a collection of smaller optimizations and cleanups, mixed with various fixes. In more detail, this contains: - Addition of policy specific data to blkcg for block cgroups. From Arianna Avanzini. - Various cleanups around command types from Christoph. - Cleanup of the suspend block I/O path from Christoph. - Plugging updates from Shaohua and Jeff Moyer, for blk-mq. - Eliminating atomic inc/dec of both remaining IO count and reference count in a bio. From me. - Fixes for SG gap and chunk size support for data-less (discards) IO, so we can merge these better. From me. - Small restructuring of blk-mq shared tag support, freeing drivers from iterating hardware queues. From Keith Busch. - A few cfq-iosched tweaks, from Tahsin Erdogan and me. Makes the IOPS mode the default for non-rotational storage" * 'for-4.2/core' of git://git.kernel.dk/linux-block: (35 commits) cfq-iosched: fix other locations where blkcg_to_cfqgd() can return NULL cfq-iosched: fix sysfs oops when attempting to read unconfigured weights cfq-iosched: move group scheduling functions under ifdef cfq-iosched: fix the setting of IOPS mode on SSDs blktrace: Add blktrace.c to BLOCK LAYER in MAINTAINERS file block, cgroup: implement policy-specific per-blkcg data block: Make CFQ default to IOPS mode on SSDs block: add blk_set_queue_dying() to blkdev.h blk-mq: Shared tag enhancements block: don't honor chunk sizes for data-less IO block: only honor SG gap prevention for merges that contain data block: fix returnvar.cocci warnings block, dm: don't copy bios for request clones block: remove management of bi_remaining when restoring original bi_end_io block: replace trylock with mutex_lock in blkdev_reread_part() block: export blkdev_reread_part() and __blkdev_reread_part() suspend: simplify block I/O handling block: collapse bio bit space block: remove unused BIO_RW_BLOCK and BIO_EOF flags block: remove BIO_EOPNOTSUPP ...
This commit is contained in:
@@ -30,7 +30,6 @@ struct scsi_ioctl_command;
|
||||
|
||||
struct request_queue;
|
||||
struct elevator_queue;
|
||||
struct request_pm_state;
|
||||
struct blk_trace;
|
||||
struct request;
|
||||
struct sg_io_hdr;
|
||||
@@ -75,18 +74,7 @@ struct request_list {
|
||||
enum rq_cmd_type_bits {
|
||||
REQ_TYPE_FS = 1, /* fs request */
|
||||
REQ_TYPE_BLOCK_PC, /* scsi command */
|
||||
REQ_TYPE_SENSE, /* sense request */
|
||||
REQ_TYPE_PM_SUSPEND, /* suspend request */
|
||||
REQ_TYPE_PM_RESUME, /* resume request */
|
||||
REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
|
||||
REQ_TYPE_SPECIAL, /* driver defined type */
|
||||
/*
|
||||
* for ATA/ATAPI devices. this really doesn't belong here, ide should
|
||||
* use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
|
||||
* private REQ_LB opcodes to differentiate what type of request this is
|
||||
*/
|
||||
REQ_TYPE_ATA_TASKFILE,
|
||||
REQ_TYPE_ATA_PC,
|
||||
REQ_TYPE_DRV_PRIV, /* driver defined types from here */
|
||||
};
|
||||
|
||||
#define BLK_MAX_CDB 16
|
||||
@@ -108,7 +96,7 @@ struct request {
|
||||
struct blk_mq_ctx *mq_ctx;
|
||||
|
||||
u64 cmd_flags;
|
||||
enum rq_cmd_type_bits cmd_type;
|
||||
unsigned cmd_type;
|
||||
unsigned long atomic_flags;
|
||||
|
||||
int cpu;
|
||||
@@ -216,19 +204,6 @@ static inline unsigned short req_get_ioprio(struct request *req)
|
||||
return req->ioprio;
|
||||
}
|
||||
|
||||
/*
|
||||
* State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
|
||||
* requests. Some step values could eventually be made generic.
|
||||
*/
|
||||
struct request_pm_state
|
||||
{
|
||||
/* PM state machine step value, currently driver specific */
|
||||
int pm_step;
|
||||
/* requested PM state value (S1, S2, S3, S4, ...) */
|
||||
u32 pm_state;
|
||||
void* data; /* for driver use */
|
||||
};
|
||||
|
||||
#include <linux/elevator.h>
|
||||
|
||||
struct blk_queue_ctx;
|
||||
@@ -469,7 +444,7 @@ struct request_queue {
|
||||
struct mutex sysfs_lock;
|
||||
|
||||
int bypass_depth;
|
||||
int mq_freeze_depth;
|
||||
atomic_t mq_freeze_depth;
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_BSG)
|
||||
bsg_job_fn *bsg_job_fn;
|
||||
@@ -610,10 +585,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
||||
(((rq)->cmd_flags & REQ_STARTED) && \
|
||||
((rq)->cmd_type == REQ_TYPE_FS))
|
||||
|
||||
#define blk_pm_request(rq) \
|
||||
((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
|
||||
(rq)->cmd_type == REQ_TYPE_PM_RESUME)
|
||||
|
||||
#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
|
||||
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
|
||||
/* rq->queuelist of dequeued request must be list_empty() */
|
||||
@@ -804,11 +775,7 @@ extern void blk_add_request_payload(struct request *rq, struct page *page,
|
||||
unsigned int len);
|
||||
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
|
||||
extern int blk_lld_busy(struct request_queue *q);
|
||||
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
|
||||
struct bio_set *bs, gfp_t gfp_mask,
|
||||
int (*bio_ctr)(struct bio *, struct bio *, void *),
|
||||
void *data);
|
||||
extern void blk_rq_unprep_clone(struct request *rq);
|
||||
extern void blk_rq_prep_clone(struct request *rq, struct request *rq_src);
|
||||
extern int blk_insert_cloned_request(struct request_queue *q,
|
||||
struct request *rq);
|
||||
extern void blk_delay_queue(struct request_queue *, unsigned long);
|
||||
@@ -845,6 +812,7 @@ extern void blk_stop_queue(struct request_queue *q);
|
||||
extern void blk_sync_queue(struct request_queue *q);
|
||||
extern void __blk_stop_queue(struct request_queue *q);
|
||||
extern void __blk_run_queue(struct request_queue *q);
|
||||
extern void __blk_run_queue_uncond(struct request_queue *q);
|
||||
extern void blk_run_queue(struct request_queue *);
|
||||
extern void blk_run_queue_async(struct request_queue *q);
|
||||
extern int blk_rq_map_user(struct request_queue *, struct request *,
|
||||
@@ -933,7 +901,7 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
|
||||
if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
|
||||
return q->limits.max_hw_sectors;
|
||||
|
||||
if (!q->limits.chunk_sectors)
|
||||
if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD))
|
||||
return blk_queue_get_max_sectors(q, rq->cmd_flags);
|
||||
|
||||
return min(blk_max_size_offset(q, blk_rq_pos(rq)),
|
||||
@@ -1054,6 +1022,7 @@ bool __must_check blk_get_queue(struct request_queue *);
|
||||
struct request_queue *blk_alloc_queue(gfp_t);
|
||||
struct request_queue *blk_alloc_queue_node(gfp_t, int);
|
||||
extern void blk_put_queue(struct request_queue *);
|
||||
extern void blk_set_queue_dying(struct request_queue *);
|
||||
|
||||
/*
|
||||
* block layer runtime pm functions
|
||||
|
Reference in New Issue
Block a user