libceph: preallocate message data items
Currently message data items are allocated with ceph_msg_data_create()
in setup_request_data() inside send_request(). send_request() has never
been allowed to fail, so each allocation is followed by a BUG_ON:
data = ceph_msg_data_create(...);
BUG_ON(!data);
It's been this way since support for multiple message data items was
added in commit 6644ed7b7e
("libceph: make message data be a pointer")
in 3.10.
There is no reason to delay the allocation of message data items until
the last possible moment and we certainly don't need a linked list of
them as they are only ever appended to the end and never erased. Make
ceph_msg_new2() take max_data_items and adapt the rest of the code.
Reported-by: Jerry Lee <leisurelysw24@gmail.com>
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
@@ -82,22 +82,6 @@ enum ceph_msg_data_type {
|
||||
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
|
||||
};
|
||||
|
||||
static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case CEPH_MSG_DATA_NONE:
|
||||
case CEPH_MSG_DATA_PAGES:
|
||||
case CEPH_MSG_DATA_PAGELIST:
|
||||
#ifdef CONFIG_BLOCK
|
||||
case CEPH_MSG_DATA_BIO:
|
||||
#endif /* CONFIG_BLOCK */
|
||||
case CEPH_MSG_DATA_BVECS:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
struct ceph_bio_iter {
|
||||
@@ -181,7 +165,6 @@ struct ceph_bvec_iter {
|
||||
} while (0)
|
||||
|
||||
struct ceph_msg_data {
|
||||
struct list_head links; /* ceph_msg->data */
|
||||
enum ceph_msg_data_type type;
|
||||
union {
|
||||
#ifdef CONFIG_BLOCK
|
||||
@@ -202,7 +185,6 @@ struct ceph_msg_data {
|
||||
|
||||
struct ceph_msg_data_cursor {
|
||||
size_t total_resid; /* across all data items */
|
||||
struct list_head *data_head; /* = &ceph_msg->data */
|
||||
|
||||
struct ceph_msg_data *data; /* current data item */
|
||||
size_t resid; /* bytes not yet consumed */
|
||||
@@ -240,7 +222,9 @@ struct ceph_msg {
|
||||
struct ceph_buffer *middle;
|
||||
|
||||
size_t data_length;
|
||||
struct list_head data;
|
||||
struct ceph_msg_data *data;
|
||||
int num_data_items;
|
||||
int max_data_items;
|
||||
struct ceph_msg_data_cursor cursor;
|
||||
|
||||
struct ceph_connection *con;
|
||||
@@ -381,6 +365,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
|
||||
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
|
||||
struct ceph_bvec_iter *bvec_pos);
|
||||
|
||||
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
|
||||
gfp_t flags, bool can_fail);
|
||||
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
|
||||
bool can_fail);
|
||||
|
||||
|
@@ -13,14 +13,15 @@ struct ceph_msgpool {
|
||||
mempool_t *pool;
|
||||
int type; /* preallocated message type */
|
||||
int front_len; /* preallocated payload size */
|
||||
int max_data_items;
|
||||
};
|
||||
|
||||
extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
|
||||
int front_len, int size, bool blocking,
|
||||
const char *name);
|
||||
int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
|
||||
int front_len, int max_data_items, int size,
|
||||
const char *name);
|
||||
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
|
||||
extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
|
||||
int front_len);
|
||||
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len,
|
||||
int max_data_items);
|
||||
extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user