|
@@ -32,72 +32,31 @@ struct wbuff_holder wbuff;
|
|
|
|
|
|
/**
|
|
|
* wbuff_get_pool_slot_from_len() - get pool_id from length
|
|
|
+ * @mod: wbuff module reference
|
|
|
* @len: length of the buffer
|
|
|
*
|
|
|
* Return: pool_id
|
|
|
*/
|
|
|
-static uint8_t wbuff_get_pool_slot_from_len(uint16_t len)
|
|
|
+static uint8_t
|
|
|
+wbuff_get_pool_slot_from_len(struct wbuff_module *mod, uint16_t len)
|
|
|
{
|
|
|
- if ((len > 0) && (len <= WBUFF_LEN_POOL0))
|
|
|
- return WBUFF_POOL_0;
|
|
|
- else if ((len > WBUFF_LEN_POOL0) && (len <= WBUFF_LEN_POOL1))
|
|
|
- return WBUFF_POOL_1;
|
|
|
- else if ((len > WBUFF_LEN_POOL1) && (len <= WBUFF_LEN_POOL2))
|
|
|
- return WBUFF_POOL_2;
|
|
|
- else
|
|
|
- return WBUFF_POOL_3;
|
|
|
-}
|
|
|
+ struct wbuff_pool *pool;
|
|
|
+ uint16_t prev_buf_size = 0;
|
|
|
+ int i;
|
|
|
|
|
|
-/**
|
|
|
- * wbuff_get_len_from_pool_slot() - get len from pool_id
|
|
|
- * @pool_id: pool ID
|
|
|
- *
|
|
|
- * Return: nbuf length from pool_id
|
|
|
- */
|
|
|
-static uint32_t wbuff_get_len_from_pool_slot(uint16_t pool_id)
|
|
|
-{
|
|
|
- uint32_t len = 0;
|
|
|
-
|
|
|
- switch (pool_id) {
|
|
|
- case 0:
|
|
|
- len = WBUFF_LEN_POOL0;
|
|
|
- break;
|
|
|
- case 1:
|
|
|
- len = WBUFF_LEN_POOL1;
|
|
|
- break;
|
|
|
- case 2:
|
|
|
- len = WBUFF_LEN_POOL2;
|
|
|
- break;
|
|
|
- case 3:
|
|
|
- len = WBUFF_LEN_POOL3;
|
|
|
- break;
|
|
|
- default:
|
|
|
- len = 0;
|
|
|
- }
|
|
|
+ for (i = 0; i < WBUFF_MAX_POOLS; i++) {
|
|
|
+ pool = &mod->wbuff_pool[i];
|
|
|
|
|
|
- return len;
|
|
|
-}
|
|
|
+ if (!pool->initialized)
|
|
|
+ continue;
|
|
|
|
|
|
-/**
|
|
|
- * wbuff_get_free_mod_slot() - get free module_id
|
|
|
- *
|
|
|
- * Return: module_id
|
|
|
- */
|
|
|
-static uint8_t wbuff_get_free_mod_slot(void)
|
|
|
-{
|
|
|
- uint8_t module_id = 0;
|
|
|
-
|
|
|
- for (module_id = 0; module_id < WBUFF_MAX_MODULES; module_id++) {
|
|
|
- qdf_spin_lock_bh(&wbuff.mod[module_id].lock);
|
|
|
- if (!wbuff.mod[module_id].registered) {
|
|
|
- wbuff.mod[module_id].registered = true;
|
|
|
- qdf_spin_unlock_bh(&wbuff.mod[module_id].lock);
|
|
|
+ if ((len > prev_buf_size) && (len <= pool->buffer_size))
|
|
|
break;
|
|
|
- }
|
|
|
- qdf_spin_unlock_bh(&wbuff.mod[module_id].lock);
|
|
|
+
|
|
|
+ prev_buf_size = mod->wbuff_pool[i].buffer_size;
|
|
|
}
|
|
|
|
|
|
- return module_id;
|
|
|
+ return i;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -108,17 +67,13 @@ static uint8_t wbuff_get_free_mod_slot(void)
|
|
|
* Return: true if valid wbuff_alloc_request
|
|
|
* false if invalid wbuff_alloc_request
|
|
|
*/
|
|
|
-static bool wbuff_is_valid_alloc_req(struct wbuff_alloc_request *req,
|
|
|
- uint8_t num)
|
|
|
+static bool
|
|
|
+wbuff_is_valid_alloc_req(struct wbuff_alloc_request *req, uint8_t num)
|
|
|
{
|
|
|
- uint16_t psize = 0;
|
|
|
- uint8_t alloc = 0, pool_id = 0;
|
|
|
-
|
|
|
- for (alloc = 0; alloc < num; alloc++) {
|
|
|
- pool_id = req[alloc].slot;
|
|
|
- psize = req[alloc].size;
|
|
|
- if ((pool_id > WBUFF_MAX_POOLS - 1) ||
|
|
|
- (psize > wbuff_alloc_max[pool_id]))
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < num; i++) {
|
|
|
+ if (req[i].pool_id >= WBUFF_MAX_POOLS)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
@@ -142,8 +97,7 @@ static qdf_nbuf_t wbuff_prepare_nbuf(uint8_t module_id, uint8_t pool_id,
|
|
|
qdf_nbuf_t buf;
|
|
|
unsigned long dev_scratch = 0;
|
|
|
|
|
|
- buf = qdf_nbuf_alloc(NULL, roundup(len + reserve, align), reserve,
|
|
|
- align, false);
|
|
|
+ buf = qdf_nbuf_alloc(NULL, len, reserve, align, false);
|
|
|
if (!buf)
|
|
|
return NULL;
|
|
|
dev_scratch = module_id;
|
|
@@ -184,7 +138,7 @@ QDF_STATUS wbuff_module_init(void)
|
|
|
mod = &wbuff.mod[module_id];
|
|
|
qdf_spinlock_create(&mod->lock);
|
|
|
for (pool_id = 0; pool_id < WBUFF_MAX_POOLS; pool_id++)
|
|
|
- mod->pool[pool_id] = NULL;
|
|
|
+ mod->wbuff_pool[pool_id].pool = NULL;
|
|
|
mod->registered = false;
|
|
|
}
|
|
|
wbuff.initialized = true;
|
|
@@ -213,56 +167,69 @@ QDF_STATUS wbuff_module_deinit(void)
|
|
|
}
|
|
|
|
|
|
struct wbuff_mod_handle *
|
|
|
-wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num,
|
|
|
- int reserve, int align)
|
|
|
+wbuff_module_register(struct wbuff_alloc_request *req, uint8_t num_pools,
|
|
|
+ int reserve, int align, enum wbuff_module_id module_id)
|
|
|
{
|
|
|
struct wbuff_module *mod = NULL;
|
|
|
+ struct wbuff_pool *wbuff_pool;
|
|
|
qdf_nbuf_t buf = NULL;
|
|
|
- uint32_t len = 0;
|
|
|
- uint16_t idx = 0, psize = 0;
|
|
|
- uint8_t alloc = 0, module_id = 0, pool_id = 0;
|
|
|
+ uint32_t len;
|
|
|
+ uint16_t pool_size;
|
|
|
+ uint8_t pool_id;
|
|
|
+ int i;
|
|
|
+ int j;
|
|
|
|
|
|
if (!wbuff.initialized)
|
|
|
return NULL;
|
|
|
|
|
|
- if ((num == 0) || (num > WBUFF_MAX_POOLS))
|
|
|
+ if ((num_pools == 0) || (num_pools > WBUFF_MAX_POOLS))
|
|
|
return NULL;
|
|
|
|
|
|
- if (!wbuff_is_valid_alloc_req(req, num))
|
|
|
+ if (module_id >= WBUFF_MAX_MODULES)
|
|
|
return NULL;
|
|
|
|
|
|
- module_id = wbuff_get_free_mod_slot();
|
|
|
- if (module_id == WBUFF_MAX_MODULES)
|
|
|
+ if (!wbuff_is_valid_alloc_req(req, num_pools))
|
|
|
return NULL;
|
|
|
|
|
|
mod = &wbuff.mod[module_id];
|
|
|
+ if (mod->registered)
|
|
|
+ return NULL;
|
|
|
|
|
|
mod->handle.id = module_id;
|
|
|
|
|
|
- for (alloc = 0; alloc < num; alloc++) {
|
|
|
- pool_id = req[alloc].slot;
|
|
|
- psize = req[alloc].size;
|
|
|
- len = wbuff_get_len_from_pool_slot(pool_id);
|
|
|
+ for (i = 0; i < num_pools; i++) {
|
|
|
+ pool_id = req[i].pool_id;
|
|
|
+ pool_size = req[i].pool_size;
|
|
|
+ len = req[i].buffer_size;
|
|
|
+ wbuff_pool = &mod->wbuff_pool[pool_id];
|
|
|
+
|
|
|
/**
|
|
|
- * Allocate pool_cnt number of buffers for
|
|
|
+ * Allocate pool_size number of buffers for
|
|
|
* the pool given by pool_id
|
|
|
*/
|
|
|
- for (idx = 0; idx < psize; idx++) {
|
|
|
+ for (j = 0; j < pool_size; j++) {
|
|
|
buf = wbuff_prepare_nbuf(module_id, pool_id, len,
|
|
|
reserve, align);
|
|
|
if (!buf)
|
|
|
continue;
|
|
|
- if (!mod->pool[pool_id]) {
|
|
|
+
|
|
|
+ if (!wbuff_pool->pool)
|
|
|
qdf_nbuf_set_next(buf, NULL);
|
|
|
- mod->pool[pool_id] = buf;
|
|
|
- } else {
|
|
|
- qdf_nbuf_set_next(buf, mod->pool[pool_id]);
|
|
|
- mod->pool[pool_id] = buf;
|
|
|
- }
|
|
|
+ else
|
|
|
+ qdf_nbuf_set_next(buf, wbuff_pool->pool);
|
|
|
+
|
|
|
+ wbuff_pool->pool = buf;
|
|
|
}
|
|
|
+
|
|
|
+ wbuff_pool->pool_id = pool_id;
|
|
|
+ wbuff_pool->buffer_size = len;
|
|
|
+ wbuff_pool->initialized = true;
|
|
|
}
|
|
|
+
|
|
|
mod->reserve = reserve;
|
|
|
mod->align = align;
|
|
|
+ mod->registered = true;
|
|
|
+
|
|
|
|
|
|
return (struct wbuff_mod_handle *)&mod->handle;
|
|
|
}
|
|
@@ -284,7 +251,7 @@ QDF_STATUS wbuff_module_deregister(struct wbuff_mod_handle *hdl)
|
|
|
|
|
|
qdf_spin_lock_bh(&mod->lock);
|
|
|
for (pool_id = 0; pool_id < WBUFF_MAX_POOLS; pool_id++) {
|
|
|
- first = mod->pool[pool_id];
|
|
|
+ first = mod->wbuff_pool[pool_id].pool;
|
|
|
while (first) {
|
|
|
buf = first;
|
|
|
first = qdf_nbuf_next(buf);
|
|
@@ -302,27 +269,33 @@ qdf_nbuf_t wbuff_buff_get(struct wbuff_mod_handle *hdl, uint32_t len,
|
|
|
{
|
|
|
struct wbuff_handle *handle;
|
|
|
struct wbuff_module *mod = NULL;
|
|
|
+ struct wbuff_pool *wbuff_pool;
|
|
|
uint8_t module_id = 0;
|
|
|
uint8_t pool_id = 0;
|
|
|
qdf_nbuf_t buf = NULL;
|
|
|
|
|
|
handle = (struct wbuff_handle *)hdl;
|
|
|
|
|
|
- if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)) || !len ||
|
|
|
- (len > WBUFF_MAX_BUFFER_SIZE))
|
|
|
+ if ((!wbuff.initialized) || (!wbuff_is_valid_handle(handle)) || !len)
|
|
|
return NULL;
|
|
|
|
|
|
module_id = handle->id;
|
|
|
- pool_id = wbuff_get_pool_slot_from_len(len);
|
|
|
mod = &wbuff.mod[module_id];
|
|
|
|
|
|
+ pool_id = wbuff_get_pool_slot_from_len(mod, len);
|
|
|
+ if (pool_id == WBUFF_MAX_POOLS)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ wbuff_pool = &mod->wbuff_pool[pool_id];
|
|
|
+
|
|
|
qdf_spin_lock_bh(&mod->lock);
|
|
|
- if (mod->pool[pool_id]) {
|
|
|
- buf = mod->pool[pool_id];
|
|
|
- mod->pool[pool_id] = qdf_nbuf_next(buf);
|
|
|
+ if (wbuff_pool->pool) {
|
|
|
+ buf = wbuff_pool->pool;
|
|
|
+ wbuff_pool->pool = qdf_nbuf_next(buf);
|
|
|
mod->pending_returns++;
|
|
|
}
|
|
|
qdf_spin_unlock_bh(&mod->lock);
|
|
|
+
|
|
|
if (buf) {
|
|
|
qdf_nbuf_set_next(buf, NULL);
|
|
|
qdf_net_buf_debug_update_node(buf, func_name, line_num);
|
|
@@ -336,6 +309,7 @@ qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf)
|
|
|
qdf_nbuf_t buffer = buf;
|
|
|
unsigned long pool_info = 0;
|
|
|
uint8_t module_id = 0, pool_id = 0;
|
|
|
+ struct wbuff_pool *wbuff_pool;
|
|
|
|
|
|
if (!wbuff.initialized)
|
|
|
return buffer;
|
|
@@ -351,13 +325,15 @@ qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf)
|
|
|
if (module_id >= WBUFF_MAX_MODULES || pool_id >= WBUFF_MAX_POOLS)
|
|
|
return NULL;
|
|
|
|
|
|
+ wbuff_pool = &wbuff.mod[module_id].wbuff_pool[pool_id];
|
|
|
+
|
|
|
qdf_nbuf_reset(buffer, wbuff.mod[module_id].reserve,
|
|
|
wbuff.mod[module_id].align);
|
|
|
|
|
|
qdf_spin_lock_bh(&wbuff.mod[module_id].lock);
|
|
|
if (wbuff.mod[module_id].registered) {
|
|
|
- qdf_nbuf_set_next(buffer, wbuff.mod[module_id].pool[pool_id]);
|
|
|
- wbuff.mod[module_id].pool[pool_id] = buffer;
|
|
|
+ qdf_nbuf_set_next(buffer, wbuff_pool->pool);
|
|
|
+ wbuff_pool->pool = buffer;
|
|
|
wbuff.mod[module_id].pending_returns--;
|
|
|
buffer = NULL;
|
|
|
}
|