block: prepare for multiple request_lists
Request allocation is about to be made per-blkg meaning that there'll be multiple request lists. * Make queue full state per request_list. blk_*queue_full() functions are renamed to blk_*rl_full() and takes @rl instead of @q. * Rename blk_init_free_list() to blk_init_rl() and make it take @rl instead of @q. Also add @gfp_mask parameter. * Add blk_exit_rl() instead of destroying rl directly from blk_release_queue(). * Add request_list->q and make request alloc/free functions - blk_free_request(), [__]freed_request(), __get_request() - take @rl instead of @q. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
@@ -46,7 +46,12 @@ struct blkcg_gq;
|
||||
struct request;
|
||||
typedef void (rq_end_io_fn)(struct request *, int);
|
||||
|
||||
#define BLK_RL_SYNCFULL (1U << 0)
|
||||
#define BLK_RL_ASYNCFULL (1U << 1)
|
||||
|
||||
struct request_list {
|
||||
struct request_queue *q; /* the queue this rl belongs to */
|
||||
|
||||
/*
|
||||
* count[], starved[], and wait[] are indexed by
|
||||
* BLK_RW_SYNC/BLK_RW_ASYNC
|
||||
@@ -55,6 +60,7 @@ struct request_list {
|
||||
int starved[2];
|
||||
mempool_t *rq_pool;
|
||||
wait_queue_head_t wait[2];
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -562,27 +568,25 @@ static inline bool rq_is_sync(struct request *rq)
|
||||
return rw_is_sync(rq->cmd_flags);
|
||||
}
|
||||
|
||||
static inline int blk_queue_full(struct request_queue *q, int sync)
|
||||
static inline bool blk_rl_full(struct request_list *rl, bool sync)
|
||||
{
|
||||
if (sync)
|
||||
return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
|
||||
return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
|
||||
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
||||
|
||||
return rl->flags & flag;
|
||||
}
|
||||
|
||||
static inline void blk_set_queue_full(struct request_queue *q, int sync)
|
||||
static inline void blk_set_rl_full(struct request_list *rl, bool sync)
|
||||
{
|
||||
if (sync)
|
||||
queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
|
||||
else
|
||||
queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
|
||||
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
||||
|
||||
rl->flags |= flag;
|
||||
}
|
||||
|
||||
static inline void blk_clear_queue_full(struct request_queue *q, int sync)
|
||||
static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
|
||||
{
|
||||
if (sync)
|
||||
queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
|
||||
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
||||
|
||||
rl->flags &= ~flag;
|
||||
}
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user