qcacmn: Use multi-page alloc for tso descs

Regular allocs usually result in allocation of size aligned to
slab boundary resulting in more memory than the requested. Use
multi-page allocation to avoid allocation of more memory than
requested.

Change-Id: Id6562f2fe419179e27206ff5b1d543090b931607
CRs-Fixed: 2294213
This commit is contained in:
Kiran Venkatappa
2018-08-06 22:08:36 +05:30
committed by nshrivas
parent df19d48862
commit 08bf93bbaf
3 changed files with 81 additions and 119 deletions

View File

@@ -323,53 +323,46 @@ QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem) uint16_t num_elem)
{ {
int i; struct dp_tx_tso_seg_pool_s *tso_desc_pool;
struct qdf_tso_seg_elem_t *c_element; uint32_t desc_size;
struct qdf_tso_seg_elem_t *temp;
soc->tx_tso_desc[pool_id].num_free = 0; tso_desc_pool = &soc->tx_tso_desc[pool_id];
c_element = qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); tso_desc_pool->num_free = 0;
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
qdf_mem_multi_pages_alloc(soc->osdev,
&tso_desc_pool->desc_pages,
desc_size,
num_elem, 0, true);
if (!c_element) { if (!tso_desc_pool->desc_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %pK pool_id %d"), FL("Alloc Failed %pK pool_id %d"),
soc, pool_id); soc, pool_id);
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_NOMEM;
} }
soc->tx_tso_desc[pool_id].freelist = c_element; tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
soc->tx_tso_desc[pool_id].num_free++; *tso_desc_pool->desc_pages.cacheable_pages;
for (i = 0; i < (num_elem - 1); i++) { tso_desc_pool->num_free = num_elem;
c_element->next = if (qdf_mem_multi_page_link(soc->osdev,
qdf_mem_malloc(sizeof(struct qdf_tso_seg_elem_t)); &tso_desc_pool->desc_pages,
if (!c_element->next) { desc_size,
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, num_elem, true)) {
FL("Alloc Failed %pK pool_id %d"), QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
soc, pool_id); "invalid tso desc allocation - overflow num link");
goto fail; goto free_tso_desc;
}
soc->tx_tso_desc[pool_id].num_free++;
c_element = c_element->next;
c_element->next = NULL;
} }
TSO_DEBUG("Number of free descriptors: %u\n", TSO_DEBUG("Number of free descriptors: %u\n", tso_desc_pool->num_free);
soc->tx_tso_desc[pool_id].num_free); tso_desc_pool->pool_size = num_elem;
soc->tx_tso_desc[pool_id].pool_size = num_elem; qdf_spinlock_create(&tso_desc_pool->lock);
qdf_spinlock_create(&soc->tx_tso_desc[pool_id].lock);
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
fail: free_tso_desc:
c_element = soc->tx_tso_desc[pool_id].freelist; qdf_mem_multi_pages_free(soc->osdev,
while (c_element) { &tso_desc_pool->desc_pages, 0, true);
temp = c_element->next;
qdf_mem_free(c_element);
c_element = temp;
}
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_FAULT;
} }
/** /**
@@ -381,33 +374,19 @@ fail:
*/ */
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
{ {
int i; struct dp_tx_tso_seg_pool_s *tso_desc_pool;
struct qdf_tso_seg_elem_t *c_element;
struct qdf_tso_seg_elem_t *temp;
qdf_spin_lock_bh(&soc->tx_tso_desc[pool_id].lock); tso_desc_pool = &soc->tx_tso_desc[pool_id];
c_element = soc->tx_tso_desc[pool_id].freelist;
if (!c_element) { qdf_spin_lock_bh(&tso_desc_pool->lock);
qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("Desc Pool Corrupt %d"), pool_id);
return;
}
for (i = 0; i < soc->tx_tso_desc[pool_id].pool_size; i++) { qdf_mem_multi_pages_free(soc->osdev,
temp = c_element->next; &tso_desc_pool->desc_pages, 0, true);
qdf_mem_free(c_element); tso_desc_pool->freelist = NULL;
c_element = temp; tso_desc_pool->num_free = 0;
if (!c_element) tso_desc_pool->pool_size = 0;
break; qdf_spin_unlock_bh(&tso_desc_pool->lock);
} qdf_spinlock_destroy(&tso_desc_pool->lock);
soc->tx_tso_desc[pool_id].freelist = NULL;
soc->tx_tso_desc[pool_id].num_free = 0;
soc->tx_tso_desc[pool_id].pool_size = 0;
qdf_spin_unlock_bh(&soc->tx_tso_desc[pool_id].lock);
qdf_spinlock_destroy(&soc->tx_tso_desc[pool_id].lock);
return; return;
} }
/** /**
@@ -421,51 +400,45 @@ void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem) uint16_t num_elem)
{ {
struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
uint32_t desc_size;
int i; tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
struct qdf_tso_num_seg_elem_t *c_element; tso_num_seg_pool->num_free = 0;
struct qdf_tso_num_seg_elem_t *temp; desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
qdf_mem_multi_pages_alloc(soc->osdev,
soc->tx_tso_num_seg[pool_id].num_free = 0; &tso_num_seg_pool->desc_pages,
c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t)); desc_size,
num_elem, 0, true);
if (!c_element) { if (!tso_num_seg_pool->desc_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %pK pool_id %d"), FL("Alloc Failed %pK pool_id %d"),
soc, pool_id); soc, pool_id);
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_NOMEM;
} }
soc->tx_tso_num_seg[pool_id].freelist = c_element; if (qdf_mem_multi_page_link(soc->osdev,
soc->tx_tso_num_seg[pool_id].num_free++; &tso_num_seg_pool->desc_pages,
for (i = 0; i < (num_elem - 1); i++) { desc_size,
c_element->next = num_elem, true)) {
qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t)); QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"invalid tso desc allocation - overflow num link");
if (!c_element->next) { goto fail;
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %pK pool_id %d"),
soc, pool_id);
goto fail;
}
soc->tx_tso_num_seg[pool_id].num_free++;
c_element = c_element->next;
c_element->next = NULL;
} }
soc->tx_tso_num_seg[pool_id].num_seg_pool_size = num_elem; tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
qdf_spinlock_create(&soc->tx_tso_num_seg[pool_id].lock); *tso_num_seg_pool->desc_pages.cacheable_pages;
tso_num_seg_pool->num_free = num_elem;
tso_num_seg_pool->num_seg_pool_size = num_elem;
qdf_spinlock_create(&tso_num_seg_pool->lock);
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
fail: fail:
c_element = soc->tx_tso_num_seg[pool_id].freelist; qdf_mem_multi_pages_free(soc->osdev,
while (c_element) { &tso_num_seg_pool->desc_pages, 0, true);
temp = c_element->next;
qdf_mem_free(c_element);
c_element = temp;
}
return QDF_STATUS_E_NOMEM; return QDF_STATUS_E_NOMEM;
} }
@@ -479,33 +452,18 @@ fail:
*/ */
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id) void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
{ {
int i; struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
struct qdf_tso_num_seg_elem_t *c_element;
struct qdf_tso_num_seg_elem_t *temp;
qdf_spin_lock_bh(&soc->tx_tso_num_seg[pool_id].lock); tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
c_element = soc->tx_tso_num_seg[pool_id].freelist; qdf_spin_lock_bh(&tso_num_seg_pool->lock);
if (!c_element) { qdf_mem_multi_pages_free(soc->osdev,
qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock); &tso_num_seg_pool->desc_pages, 0, true);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, tso_num_seg_pool->freelist = NULL;
FL("Desc Pool Corrupt %d"), pool_id); tso_num_seg_pool->num_free = 0;
return; tso_num_seg_pool->num_seg_pool_size = 0;
} qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
qdf_spinlock_destroy(&tso_num_seg_pool->lock);
for (i = 0; i < soc->tx_tso_num_seg[pool_id].num_seg_pool_size; i++) {
temp = c_element->next;
qdf_mem_free(c_element);
c_element = temp;
if (!c_element)
break;
}
soc->tx_tso_num_seg[pool_id].freelist = NULL;
soc->tx_tso_num_seg[pool_id].num_free = 0;
soc->tx_tso_num_seg[pool_id].num_seg_pool_size = 0;
qdf_spin_unlock_bh(&soc->tx_tso_num_seg[pool_id].lock);
qdf_spinlock_destroy(&soc->tx_tso_num_seg[pool_id].lock);
return; return;
} }

View File

@@ -323,12 +323,14 @@ enum flow_pool_status {
* @pool_size: total number of pool elements * @pool_size: total number of pool elements
* @num_free: free element count * @num_free: free element count
* @freelist: first free element pointer * @freelist: first free element pointer
* @desc_pages: multiple page allocation information for actual descriptors
* @lock: lock for accessing the pool * @lock: lock for accessing the pool
*/ */
struct dp_tx_tso_seg_pool_s { struct dp_tx_tso_seg_pool_s {
uint16_t pool_size; uint16_t pool_size;
uint16_t num_free; uint16_t num_free;
struct qdf_tso_seg_elem_t *freelist; struct qdf_tso_seg_elem_t *freelist;
struct qdf_mem_multi_page_t desc_pages;
qdf_spinlock_t lock; qdf_spinlock_t lock;
}; };
@@ -337,6 +339,7 @@ struct dp_tx_tso_seg_pool_s {
* @num_seg_pool_size: total number of pool elements * @num_seg_pool_size: total number of pool elements
* @num_free: free element count * @num_free: free element count
* @freelist: first free element pointer * @freelist: first free element pointer
* @desc_pages: multiple page allocation information for actual descriptors
* @lock: lock for accessing the pool * @lock: lock for accessing the pool
*/ */
@@ -344,6 +347,7 @@ struct dp_tx_tso_num_seg_pool_s {
uint16_t num_seg_pool_size; uint16_t num_seg_pool_size;
uint16_t num_free; uint16_t num_free;
struct qdf_tso_num_seg_elem_t *freelist; struct qdf_tso_num_seg_elem_t *freelist;
struct qdf_mem_multi_page_t desc_pages;
/*tso mutex */ /*tso mutex */
qdf_spinlock_t lock; qdf_spinlock_t lock;
}; };

View File

@@ -1005,16 +1005,16 @@ struct qdf_tso_seg_dbg_t {
/** /**
* qdf_tso_seg_elem_t - tso segment element * qdf_tso_seg_elem_t - tso segment element
* @seg: instance of segment
* @next: pointer to the next segment * @next: pointer to the next segment
* @seg: instance of segment
*/ */
struct qdf_tso_seg_elem_t { struct qdf_tso_seg_elem_t {
struct qdf_tso_seg_elem_t *next;
struct qdf_tso_seg_t seg; struct qdf_tso_seg_t seg;
uint32_t cookie:13, uint32_t cookie:13,
on_freelist:1, on_freelist:1,
sent_to_target:1, sent_to_target:1,
force_free:1; force_free:1;
struct qdf_tso_seg_elem_t *next;
#ifdef TSOSEG_DEBUG #ifdef TSOSEG_DEBUG
struct qdf_tso_seg_dbg_t dbg; struct qdf_tso_seg_dbg_t dbg;
#endif /* TSOSEG_DEBUG */ #endif /* TSOSEG_DEBUG */
@@ -1033,12 +1033,12 @@ struct qdf_tso_num_seg_t {
/** /**
* qdf_tso_num_seg_elem_t - num of tso segment element for jumbo skb * qdf_tso_num_seg_elem_t - num of tso segment element for jumbo skb
* @num_seg: instance of num of seg
* @next: pointer to the next segment * @next: pointer to the next segment
* @num_seg: instance of num of seg
*/ */
struct qdf_tso_num_seg_elem_t { struct qdf_tso_num_seg_elem_t {
struct qdf_tso_num_seg_t num_seg;
struct qdf_tso_num_seg_elem_t *next; struct qdf_tso_num_seg_elem_t *next;
struct qdf_tso_num_seg_t num_seg;
}; };
/** /**