Merge branch 'for-3.6/core' of git://git.kernel.dk/linux-block
Pull core block IO bits from Jens Axboe: "The most complicated part if this is the request allocation rework by Tejun, which has been queued up for a long time and has been in for-next ditto as well. There are a few commits from yesterday and today, mostly trivial and obvious fixes. So I'm pretty confident that it is sound. It's also smaller than usual." * 'for-3.6/core' of git://git.kernel.dk/linux-block: block: remove dead func declaration block: add partition resize function to blkpg ioctl block: uninitialized ioc->nr_tasks triggers WARN_ON block: do not artificially constrain max_sectors for stacking drivers blkcg: implement per-blkg request allocation block: prepare for multiple request_lists block: add q->nr_rqs[] and move q->rq.elvpriv to q->nr_rqs_elvpriv blkcg: inline bio_blkcg() and friends block: allocate io_context upfront block: refactor get_request[_wait]() block: drop custom queue draining used by scsi_transport_{iscsi|fc} mempool: add @gfp_mask to mempool_create_node() blkcg: make root blkcg allocation use %GFP_KERNEL blkcg: __blkg_lookup_create() doesn't need radix preload
This commit is contained in:
@@ -46,16 +46,23 @@ struct blkcg_gq;
|
||||
struct request;
|
||||
typedef void (rq_end_io_fn)(struct request *, int);
|
||||
|
||||
#define BLK_RL_SYNCFULL (1U << 0)
|
||||
#define BLK_RL_ASYNCFULL (1U << 1)
|
||||
|
||||
struct request_list {
|
||||
struct request_queue *q; /* the queue this rl belongs to */
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
struct blkcg_gq *blkg; /* blkg this request pool belongs to */
|
||||
#endif
|
||||
/*
|
||||
* count[], starved[], and wait[] are indexed by
|
||||
* BLK_RW_SYNC/BLK_RW_ASYNC
|
||||
*/
|
||||
int count[2];
|
||||
int starved[2];
|
||||
int elvpriv;
|
||||
mempool_t *rq_pool;
|
||||
wait_queue_head_t wait[2];
|
||||
int count[2];
|
||||
int starved[2];
|
||||
mempool_t *rq_pool;
|
||||
wait_queue_head_t wait[2];
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -138,6 +145,7 @@ struct request {
|
||||
struct hd_struct *part;
|
||||
unsigned long start_time;
|
||||
#ifdef CONFIG_BLK_CGROUP
|
||||
struct request_list *rl; /* rl this rq is alloced from */
|
||||
unsigned long long start_time_ns;
|
||||
unsigned long long io_start_time_ns; /* when passed to hardware */
|
||||
#endif
|
||||
@@ -282,11 +290,16 @@ struct request_queue {
|
||||
struct list_head queue_head;
|
||||
struct request *last_merge;
|
||||
struct elevator_queue *elevator;
|
||||
int nr_rqs[2]; /* # allocated [a]sync rqs */
|
||||
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
|
||||
|
||||
/*
|
||||
* the queue request freelist, one for reads and one for writes
|
||||
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
|
||||
* is used, root blkg allocates from @q->root_rl and all other
|
||||
* blkgs from their own blkg->rl. Which one to use should be
|
||||
* determined using bio_request_list().
|
||||
*/
|
||||
struct request_list rq;
|
||||
struct request_list root_rl;
|
||||
|
||||
request_fn_proc *request_fn;
|
||||
make_request_fn *make_request_fn;
|
||||
@@ -561,27 +574,25 @@ static inline bool rq_is_sync(struct request *rq)
|
||||
return rw_is_sync(rq->cmd_flags);
|
||||
}
|
||||
|
||||
static inline int blk_queue_full(struct request_queue *q, int sync)
|
||||
static inline bool blk_rl_full(struct request_list *rl, bool sync)
|
||||
{
|
||||
if (sync)
|
||||
return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
|
||||
return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
|
||||
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
||||
|
||||
return rl->flags & flag;
|
||||
}
|
||||
|
||||
static inline void blk_set_queue_full(struct request_queue *q, int sync)
|
||||
static inline void blk_set_rl_full(struct request_list *rl, bool sync)
|
||||
{
|
||||
if (sync)
|
||||
queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
|
||||
else
|
||||
queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
|
||||
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
||||
|
||||
rl->flags |= flag;
|
||||
}
|
||||
|
||||
static inline void blk_clear_queue_full(struct request_queue *q, int sync)
|
||||
static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
|
||||
{
|
||||
if (sync)
|
||||
queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
|
||||
else
|
||||
queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
|
||||
unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
|
||||
|
||||
rl->flags &= ~flag;
|
||||
}
|
||||
|
||||
|
||||
|
@@ -40,6 +40,7 @@ struct blkpg_ioctl_arg {
|
||||
/* The subfunctions (for the op field) */
|
||||
#define BLKPG_ADD_PARTITION 1
|
||||
#define BLKPG_DEL_PARTITION 2
|
||||
#define BLKPG_RESIZE_PARTITION 3
|
||||
|
||||
/* Sizes of name fields. Unused at present. */
|
||||
#define BLKPG_DEVNAMELTH 64
|
||||
|
@@ -67,7 +67,6 @@ void bsg_job_done(struct bsg_job *job, int result,
|
||||
int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
|
||||
bsg_job_fn *job_fn, int dd_job_size);
|
||||
void bsg_request_fn(struct request_queue *q);
|
||||
void bsg_remove_queue(struct request_queue *q);
|
||||
void bsg_goose_queue(struct request_queue *q);
|
||||
|
||||
#endif
|
||||
|
@@ -97,7 +97,13 @@ struct partition_meta_info {
|
||||
|
||||
struct hd_struct {
|
||||
sector_t start_sect;
|
||||
/*
|
||||
* nr_sects is protected by sequence counter. One might extend a
|
||||
* partition while IO is happening to it and update of nr_sects
|
||||
* can be non-atomic on 32bit machines with 64bit sector_t.
|
||||
*/
|
||||
sector_t nr_sects;
|
||||
seqcount_t nr_sects_seq;
|
||||
sector_t alignment_offset;
|
||||
unsigned int discard_alignment;
|
||||
struct device __dev;
|
||||
@@ -647,6 +653,57 @@ static inline void hd_struct_put(struct hd_struct *part)
|
||||
__delete_partition(part);
|
||||
}
|
||||
|
||||
/*
|
||||
* Any access of part->nr_sects which is not protected by partition
|
||||
* bd_mutex or gendisk bdev bd_mutex, should be done using this
|
||||
* accessor function.
|
||||
*
|
||||
* Code written along the lines of i_size_read() and i_size_write().
|
||||
* CONFIG_PREEMPT case optimizes the case of UP kernel with preemption
|
||||
* on.
|
||||
*/
|
||||
static inline sector_t part_nr_sects_read(struct hd_struct *part)
|
||||
{
|
||||
#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
|
||||
sector_t nr_sects;
|
||||
unsigned seq;
|
||||
do {
|
||||
seq = read_seqcount_begin(&part->nr_sects_seq);
|
||||
nr_sects = part->nr_sects;
|
||||
} while (read_seqcount_retry(&part->nr_sects_seq, seq));
|
||||
return nr_sects;
|
||||
#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
|
||||
sector_t nr_sects;
|
||||
|
||||
preempt_disable();
|
||||
nr_sects = part->nr_sects;
|
||||
preempt_enable();
|
||||
return nr_sects;
|
||||
#else
|
||||
return part->nr_sects;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Should be called with mutex lock held (typically bd_mutex) of partition
|
||||
* to provide mutual exlusion among writers otherwise seqcount might be
|
||||
* left in wrong state leaving the readers spinning infinitely.
|
||||
*/
|
||||
static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
|
||||
{
|
||||
#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
|
||||
write_seqcount_begin(&part->nr_sects_seq);
|
||||
part->nr_sects = size;
|
||||
write_seqcount_end(&part->nr_sects_seq);
|
||||
#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
|
||||
preempt_disable();
|
||||
part->nr_sects = size;
|
||||
preempt_enable();
|
||||
#else
|
||||
part->nr_sects = size;
|
||||
#endif
|
||||
}
|
||||
|
||||
#else /* CONFIG_BLOCK */
|
||||
|
||||
static inline void printk_all_partitions(void) { }
|
||||
|
@@ -26,7 +26,8 @@ typedef struct mempool_s {
|
||||
extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data);
|
||||
extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data, int nid);
|
||||
mempool_free_t *free_fn, void *pool_data,
|
||||
gfp_t gfp_mask, int nid);
|
||||
|
||||
extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
|
||||
extern void mempool_destroy(mempool_t *pool);
|
||||
|
Reference in New Issue
Block a user