qcacld-3.0: Use ini values for datapath prealloc

The memory allocation for srngs and tx/rx descriptors
are done during prealloc using macros. This could
potentially result in OOB access if the srngs sizes
and tx/rx descriptor num of elements are increased
via ini config.

Fix is to use ini values to update the srng sizes or
num of tx/rx descriptors for appropriate memory alloc
during dp prealloc.

Change-Id: Iaeac3833fd8e13df6baa9452a7d7f194b86a8bd3
CRs-Fixed: 3012648
This commit is contained in:
Yeshwanth Sriram Guntuka
2021-04-22 14:54:50 +05:30
committed by Madan Koyyalamudi
parent 2779fc1c8d
commit 5308b1abd3
3 changed files with 155 additions and 43 deletions

View File

@@ -340,25 +340,25 @@ static struct dp_consistent_prealloc g_dp_consistent_allocs[] = {
{REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0, NULL, NULL, 0, 0}, {REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0, NULL, NULL, 0, 0},
#endif #endif
/* 3 TCL data rings */ /* 3 TCL data rings */
{TCL_DATA, (sizeof(struct tlv_32_hdr) + sizeof(struct tcl_data_cmd)) * TCL_DATA_RING_SIZE, 0, NULL, NULL, 0, 0}, {TCL_DATA, 0, 0, NULL, NULL, 0, 0},
{TCL_DATA, (sizeof(struct tlv_32_hdr) + sizeof(struct tcl_data_cmd)) * TCL_DATA_RING_SIZE, 0, NULL, NULL, 0, 0}, {TCL_DATA, 0, 0, NULL, NULL, 0, 0},
{TCL_DATA, (sizeof(struct tlv_32_hdr) + sizeof(struct tcl_data_cmd)) * TCL_DATA_RING_SIZE, 0, NULL, NULL, 0, 0}, {TCL_DATA, 0, 0, NULL, NULL, 0, 0},
/* 4 WBM2SW rings */ /* 4 WBM2SW rings */
{WBM2SW_RELEASE, (sizeof(struct wbm_release_ring)) * WBM2SW_RELEASE_RING_SIZE, 0, NULL, NULL, 0, 0}, {WBM2SW_RELEASE, 0, 0, NULL, NULL, 0, 0},
{WBM2SW_RELEASE, (sizeof(struct wbm_release_ring)) * WBM2SW_RELEASE_RING_SIZE, 0, NULL, NULL, 0, 0}, {WBM2SW_RELEASE, 0, 0, NULL, NULL, 0, 0},
{WBM2SW_RELEASE, (sizeof(struct wbm_release_ring)) * WBM2SW_RELEASE_RING_SIZE, 0, NULL, NULL, 0, 0}, {WBM2SW_RELEASE, 0, 0, NULL, NULL, 0, 0},
{WBM2SW_RELEASE, (sizeof(struct wbm_release_ring)) * WBM2SW_RELEASE_RING_SIZE, 0, NULL, 0, 0}, {WBM2SW_RELEASE, 0, 0, NULL, 0, 0},
/* SW2WBM link descriptor return ring */ /* SW2WBM link descriptor return ring */
{SW2WBM_RELEASE, (sizeof(struct wbm_release_ring)) * WLAN_CFG_WBM_RELEASE_RING_SIZE, 0, NULL, 0, 0}, {SW2WBM_RELEASE, 0, 0, NULL, 0, 0},
/* 1 WBM idle link desc ring */ /* 1 WBM idle link desc ring */
{WBM_IDLE_LINK, (sizeof(struct wbm_link_descriptor_ring)) * WBM_IDLE_LINK_RING_SIZE, 0, NULL, NULL, 0, 0}, {WBM_IDLE_LINK, (sizeof(struct wbm_link_descriptor_ring)) * WBM_IDLE_LINK_RING_SIZE, 0, NULL, NULL, 0, 0},
/* 2 RXDMA DST ERR rings */ /* 2 RXDMA DST ERR rings */
{RXDMA_DST, (sizeof(struct reo_entrance_ring)) * WLAN_CFG_RXDMA_ERR_DST_RING_SIZE, 0, NULL, NULL, 0, 0}, {RXDMA_DST, 0, 0, NULL, NULL, 0, 0},
{RXDMA_DST, (sizeof(struct reo_entrance_ring)) * WLAN_CFG_RXDMA_ERR_DST_RING_SIZE, 0, NULL, NULL, 0, 0}, {RXDMA_DST, 0, 0, NULL, NULL, 0, 0},
/* REFILL ring 0 */ /* REFILL ring 0 */
{RXDMA_BUF, (sizeof(struct wbm_buffer_ring)) * WLAN_CFG_RXDMA_REFILL_RING_SIZE, 0, NULL, NULL, 0, 0}, {RXDMA_BUF, (sizeof(struct wbm_buffer_ring)) * WLAN_CFG_RXDMA_REFILL_RING_SIZE, 0, NULL, NULL, 0, 0},
/* REO Exception ring */ /* REO Exception ring */
{REO_EXCEPTION, (sizeof(struct reo_destination_ring)) * WLAN_CFG_REO_EXCEPTION_RING_SIZE, 0, NULL, NULL, 0, 0}, {REO_EXCEPTION, 0, 0, NULL, NULL, 0, 0},
}; };
/* Number of HW link descriptors needed (rounded to power of 2) */ /* Number of HW link descriptors needed (rounded to power of 2) */
@@ -381,34 +381,46 @@ static struct dp_consistent_prealloc g_dp_consistent_allocs[] = {
static struct dp_multi_page_prealloc g_dp_multi_page_allocs[] = { static struct dp_multi_page_prealloc g_dp_multi_page_allocs[] = {
/* 4 TX DESC pools */ /* 4 TX DESC pools */
{DP_TX_DESC_TYPE, TX_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_DESC_TYPE, TX_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
{DP_TX_DESC_TYPE, TX_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_DESC_TYPE, TX_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
{DP_TX_DESC_TYPE, TX_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_DESC_TYPE, TX_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
{DP_TX_DESC_TYPE, TX_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_DESC_TYPE, TX_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
/* 4 Tx EXT DESC NON Cacheable pools */ /* 4 Tx EXT DESC NON Cacheable pools */
{DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, DP_TX_DESC_POOL_SIZE, 0, NON_CACHEABLE, { 0 } }, {DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, 0, 0,
{DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, DP_TX_DESC_POOL_SIZE, 0, NON_CACHEABLE, { 0 } }, NON_CACHEABLE, { 0 } },
{DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, DP_TX_DESC_POOL_SIZE, 0, NON_CACHEABLE, { 0 } }, {DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, 0, 0,
{DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, DP_TX_DESC_POOL_SIZE, 0, NON_CACHEABLE, { 0 } }, NON_CACHEABLE, { 0 } },
{DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, 0, 0,
NON_CACHEABLE, { 0 } },
{DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, 0, 0,
NON_CACHEABLE, { 0 } },
/* 4 Tx EXT DESC Link Cacheable pools */ /* 4 Tx EXT DESC Link Cacheable pools */
{DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), 0, 0,
{DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, CACHEABLE, { 0 } },
{DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), 0, 0,
{DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, CACHEABLE, { 0 } },
{DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), 0, 0,
CACHEABLE, { 0 } },
{DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), 0, 0,
CACHEABLE, { 0 } },
/* 4 TX TSO DESC pools */ /* 4 TX TSO DESC pools */
{DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
{DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
{DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
{DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, 0, 0, CACHEABLE, { 0 } },
/* 4 TX TSO NUM SEG DESC pools */ /* 4 TX TSO NUM SEG DESC pools */
{DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, 0, 0,
{DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, CACHEABLE, { 0 } },
{DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, {DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, 0, 0,
{DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } }, CACHEABLE, { 0 } },
{DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, 0, 0,
CACHEABLE, { 0 } },
{DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, 0, 0,
CACHEABLE, { 0 } },
/* DP RX DESCs BUF pools */ /* DP RX DESCs BUF pools */
{DP_RX_DESC_BUF_TYPE, sizeof(union dp_rx_desc_list_elem_t), {DP_RX_DESC_BUF_TYPE, sizeof(union dp_rx_desc_list_elem_t),
@@ -538,7 +550,94 @@ void dp_prealloc_deinit(void)
} }
} }
QDF_STATUS dp_prealloc_init(void) #ifdef CONFIG_BERYLLIUM
/**
* dp_get_tcl_data_srng_entrysize() - Get the tcl data srng entry
* size
*
* Return: TCL data srng entry size
*/
static inline uint32_t dp_get_tcl_data_srng_entrysize(void)
{
return sizeof(struct tcl_data_cmd);
}
#else
static inline uint32_t dp_get_tcl_data_srng_entrysize(void)
{
return (sizeof(struct tlv_32_hdr) + sizeof(struct tcl_data_cmd));
}
#endif
/**
* dp_update_mem_size_by_ring_type() - Update srng memory size based
* on ring type and the corresponding ini configuration
* @cfg: prealloc related cfg params
* @ring_type: srng type
* @mem_size: memory size to be updated
*
* Return: None
*/
static void
dp_update_mem_size_by_ring_type(struct wlan_dp_prealloc_cfg *cfg,
enum hal_ring_type ring_type,
uint32_t *mem_size)
{
switch (ring_type) {
case TCL_DATA:
*mem_size = dp_get_tcl_data_srng_entrysize() *
cfg->num_tx_ring_entries;
return;
case WBM2SW_RELEASE:
*mem_size = (sizeof(struct wbm_release_ring)) *
cfg->num_tx_comp_ring_entries;
return;
case SW2WBM_RELEASE:
*mem_size = (sizeof(struct wbm_release_ring)) *
cfg->num_wbm_rel_ring_entries;
return;
case RXDMA_DST:
*mem_size = (sizeof(struct reo_entrance_ring)) *
cfg->num_rxdma_err_dst_ring_entries;
return;
case REO_EXCEPTION:
*mem_size = (sizeof(struct reo_destination_ring)) *
cfg->num_reo_exception_ring_entries;
return;
default:
return;
}
}
/**
* dp_update_num_elements_by_desc_type() - Update num of descriptors based
* on type and the corresponding ini configuration
* @cfg: prealloc related cfg params
* @desc_type: descriptor type
* @num_elements: num of descriptor elements
*
* Return: None
*/
static void
dp_update_num_elements_by_desc_type(struct wlan_dp_prealloc_cfg *cfg,
enum dp_desc_type desc_type,
uint16_t *num_elements)
{
switch (desc_type) {
case DP_TX_DESC_TYPE:
*num_elements = cfg->num_tx_desc;
return;
case DP_TX_EXT_DESC_TYPE:
case DP_TX_EXT_DESC_LINK_TYPE:
case DP_TX_TSO_DESC_TYPE:
case DP_TX_TSO_NUM_SEG_TYPE:
*num_elements = cfg->num_tx_ext_desc;
return;
default:
return;
}
}
QDF_STATUS dp_prealloc_init(struct cdp_ctrl_objmgr_psoc *ctrl_psoc)
{ {
int i; int i;
struct dp_prealloc_context *cp; struct dp_prealloc_context *cp;
@@ -546,12 +645,15 @@ QDF_STATUS dp_prealloc_init(void)
struct dp_multi_page_prealloc *mp; struct dp_multi_page_prealloc *mp;
struct dp_consistent_prealloc_unaligned *up; struct dp_consistent_prealloc_unaligned *up;
qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE); qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
struct wlan_dp_prealloc_cfg cfg;
if (!qdf_ctx) { if (!qdf_ctx || !ctrl_psoc) {
QDF_BUG(0); QDF_BUG(0);
return QDF_STATUS_E_FAILURE; return QDF_STATUS_E_FAILURE;
} }
wlan_cfg_get_prealloc_cfg(ctrl_psoc, &cfg);
/*Context pre-alloc*/ /*Context pre-alloc*/
for (i = 0; i < QDF_ARRAY_SIZE(g_dp_context_allocs); i++) { for (i = 0; i < QDF_ARRAY_SIZE(g_dp_context_allocs); i++) {
cp = &g_dp_context_allocs[i]; cp = &g_dp_context_allocs[i];
@@ -572,6 +674,7 @@ QDF_STATUS dp_prealloc_init(void)
for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_allocs); i++) { for (i = 0; i < QDF_ARRAY_SIZE(g_dp_consistent_allocs); i++) {
p = &g_dp_consistent_allocs[i]; p = &g_dp_consistent_allocs[i];
p->in_use = 0; p->in_use = 0;
dp_update_mem_size_by_ring_type(&cfg, p->ring_type, &p->size);
p->va_aligned = p->va_aligned =
qdf_aligned_mem_alloc_consistent(qdf_ctx, qdf_aligned_mem_alloc_consistent(qdf_ctx,
&p->size, &p->size,
@@ -596,6 +699,8 @@ QDF_STATUS dp_prealloc_init(void)
for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) { for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) {
mp = &g_dp_multi_page_allocs[i]; mp = &g_dp_multi_page_allocs[i];
mp->in_use = false; mp->in_use = false;
dp_update_num_elements_by_desc_type(&cfg, mp->desc_type,
&mp->element_num);
qdf_mem_multi_pages_alloc(qdf_ctx, &mp->pages, qdf_mem_multi_pages_alloc(qdf_ctx, &mp->pages,
mp->element_size, mp->element_size,
mp->element_num, mp->element_num,

View File

@@ -482,10 +482,11 @@ int dp_rx_tm_get_pending(ol_txrx_soc_handle soc);
#ifdef DP_MEM_PRE_ALLOC #ifdef DP_MEM_PRE_ALLOC
/** /**
* dp_prealloc_init() - Pre-allocate DP memory * dp_prealloc_init() - Pre-allocate DP memory
* @ctrl_psoc: objmgr psoc
* *
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
*/ */
QDF_STATUS dp_prealloc_init(void); QDF_STATUS dp_prealloc_init(struct cdp_ctrl_objmgr_psoc *ctrl_psoc);
/** /**
* dp_prealloc_deinit() - Free pre-alloced DP memory * dp_prealloc_deinit() - Free pre-alloced DP memory
@@ -601,7 +602,11 @@ void *dp_prealloc_get_consistent_mem_unaligned(size_t size,
void dp_prealloc_put_consistent_mem_unaligned(void *va_unaligned); void dp_prealloc_put_consistent_mem_unaligned(void *va_unaligned);
#else #else
static inline QDF_STATUS dp_prealloc_init(void) { return QDF_STATUS_SUCCESS; } static inline
QDF_STATUS dp_prealloc_init(struct cdp_ctrl_objmgr_psoc *ctrl_psoc)
{
return QDF_STATUS_SUCCESS;
}
static inline void dp_prealloc_deinit(void) { } static inline void dp_prealloc_deinit(void) { }

View File

@@ -608,19 +608,19 @@ static int __hdd_soc_probe(struct device *dev,
if (errno) if (errno)
goto unlock; goto unlock;
status = dp_prealloc_init();
if (status != QDF_STATUS_SUCCESS) {
errno = qdf_status_to_os_return(status);
goto unlock;
}
hdd_ctx = hdd_context_create(dev); hdd_ctx = hdd_context_create(dev);
if (IS_ERR(hdd_ctx)) { if (IS_ERR(hdd_ctx)) {
errno = PTR_ERR(hdd_ctx); errno = PTR_ERR(hdd_ctx);
goto assert_fail_count; goto assert_fail_count;
} }
status = dp_prealloc_init((struct cdp_ctrl_objmgr_psoc *)hdd_ctx->psoc);
if (status != QDF_STATUS_SUCCESS) {
errno = qdf_status_to_os_return(status);
goto dp_prealloc_fail;
}
errno = hdd_wlan_startup(hdd_ctx); errno = hdd_wlan_startup(hdd_ctx);
if (errno) if (errno)
goto hdd_context_destroy; goto hdd_context_destroy;
@@ -645,10 +645,12 @@ wlan_exit:
hdd_wlan_exit(hdd_ctx); hdd_wlan_exit(hdd_ctx);
hdd_context_destroy: hdd_context_destroy:
dp_prealloc_deinit();
dp_prealloc_fail:
hdd_context_destroy(hdd_ctx); hdd_context_destroy(hdd_ctx);
assert_fail_count: assert_fail_count:
dp_prealloc_deinit();
probe_fail_cnt++; probe_fail_cnt++;
hdd_err("consecutive probe failures:%u", probe_fail_cnt); hdd_err("consecutive probe failures:%u", probe_fail_cnt);
QDF_BUG(probe_fail_cnt < SSR_MAX_FAIL_CNT); QDF_BUG(probe_fail_cnt < SSR_MAX_FAIL_CNT);