|
@@ -18,9 +18,13 @@
|
|
|
|
|
|
#include <wlan_objmgr_pdev_obj.h>
|
|
|
#include <dp_txrx.h>
|
|
|
+#include <dp_types.h>
|
|
|
#include <dp_internal.h>
|
|
|
#include <cdp_txrx_cmn.h>
|
|
|
#include <cdp_txrx_misc.h>
|
|
|
+#include <dp_tx_desc.h>
|
|
|
+#include <dp_rx.h>
|
|
|
+
|
|
|
|
|
|
QDF_STATUS dp_txrx_init(ol_txrx_soc_handle soc, uint8_t pdev_id,
|
|
|
struct dp_txrx_config *config)
|
|
@@ -146,6 +150,9 @@ int dp_rx_tm_get_pending(ol_txrx_soc_handle soc)
|
|
|
/* Num elements in WBM Idle Link */
|
|
|
#define WBM_IDLE_LINK_RING_SIZE (32 * 1024)
|
|
|
|
|
|
+/* Num TX desc in TX desc pool */
|
|
|
+#define DP_TX_DESC_POOL_SIZE 4096
|
|
|
+
|
|
|
/**
|
|
|
* struct dp_consistent_prealloc - element representing DP pre-alloc memory
|
|
|
* @ring_type: HAL ring type
|
|
@@ -167,6 +174,25 @@ struct dp_consistent_prealloc {
|
|
|
qdf_dma_addr_t pa_aligned;
|
|
|
};
|
|
|
|
|
|
+/**
|
|
|
+ * struct dp_multi_page_prealloc - element representing DP pre-alloc multiple
|
|
|
+ pages memory
|
|
|
+ * @desc_type: source descriptor type for memory allocation
|
|
|
+ * @element_size: single element size
|
|
|
+ * @element_num: total number of elements should be allocated
|
|
|
+ * @in_use: whether this element is in use (occupied)
|
|
|
+ * @cacheable: coherent memory or cacheable memory
|
|
|
+ * @pages: multi page information storage
|
|
|
+ */
|
|
|
+struct dp_multi_page_prealloc {
|
|
|
+ enum dp_desc_type desc_type;
|
|
|
+ size_t element_size;
|
|
|
+ uint16_t element_num;
|
|
|
+ bool in_use;
|
|
|
+ bool cacheable;
|
|
|
+ struct qdf_mem_multi_page_t pages;
|
|
|
+};
|
|
|
+
|
|
|
static struct dp_consistent_prealloc g_dp_consistent_allocs[] = {
|
|
|
/* 5 REO DST rings */
|
|
|
{REO_DST, (sizeof(struct reo_destination_ring)) * REO_DST_RING_SIZE, 0, NULL, NULL, 0, 0},
|
|
@@ -195,10 +221,78 @@ static struct dp_consistent_prealloc g_dp_consistent_allocs[] = {
|
|
|
|
|
|
};
|
|
|
|
|
|
+/* Number of HW link descriptors needed (rounded to power of 2) */
|
|
|
+#define NUM_HW_LINK_DESCS (32 * 1024)
|
|
|
+
|
|
|
+/* Size in bytes of HW LINK DESC */
|
|
|
+#define HW_LINK_DESC_SIZE 128
|
|
|
+
|
|
|
+/* Size in bytes of TX Desc (rounded to power of 2) */
|
|
|
+#define TX_DESC_SIZE 128
|
|
|
+
|
|
|
+/* Size in bytes of TX TSO Desc (rounded to power of 2) */
|
|
|
+#define TX_TSO_DESC_SIZE 256
|
|
|
+
|
|
|
+/* Size in bytes of TX TSO Num Seg Desc (rounded to power of 2) */
|
|
|
+#define TX_TSO_NUM_SEG_DESC_SIZE 16
|
|
|
+
|
|
|
+#define NON_CACHEABLE 0
|
|
|
+#define CACHEABLE 1
|
|
|
+
|
|
|
+static struct dp_multi_page_prealloc g_dp_multi_page_allocs[] = {
|
|
|
+ /* 4 TX DESC pools */
|
|
|
+ {DP_TX_DESC_TYPE, TX_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_DESC_TYPE, TX_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_DESC_TYPE, TX_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_DESC_TYPE, TX_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+
|
|
|
+ /* 4 Tx EXT DESC NON Cacheable pools */
|
|
|
+ {DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, DP_TX_DESC_POOL_SIZE, 0, NON_CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, DP_TX_DESC_POOL_SIZE, 0, NON_CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, DP_TX_DESC_POOL_SIZE, 0, NON_CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_EXT_DESC_TYPE, HAL_TX_EXT_DESC_WITH_META_DATA, DP_TX_DESC_POOL_SIZE, 0, NON_CACHEABLE, { 0 } },
|
|
|
+
|
|
|
+ /* 4 Tx EXT DESC Link Cacheable pools */
|
|
|
+ {DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_EXT_DESC_LINK_TYPE, sizeof(struct dp_tx_ext_desc_elem_s), DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+
|
|
|
+ /* 4 TX TSO DESC pools */
|
|
|
+ {DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_TSO_DESC_TYPE, TX_TSO_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+
|
|
|
+ /* 4 TX TSO NUM SEG DESC pools */
|
|
|
+ {DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_TX_TSO_NUM_SEG_TYPE, TX_TSO_NUM_SEG_DESC_SIZE, DP_TX_DESC_POOL_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+
|
|
|
+ /* DP RX DESCs pools */
|
|
|
+ {DP_RX_DESC_BUF_TYPE, sizeof(union dp_rx_desc_list_elem_t),
|
|
|
+ WLAN_CFG_RX_SW_DESC_WEIGHT_SIZE * WLAN_CFG_RXDMA_REFILL_RING_SIZE, 0, CACHEABLE, { 0 } },
|
|
|
+
|
|
|
+#ifdef DISABLE_MON_CONFIG
|
|
|
+ /* no op */
|
|
|
+#else
|
|
|
+ /* 2 DP RX DESCs Status pools */
|
|
|
+ {DP_RX_DESC_STATUS_TYPE, sizeof(union dp_rx_desc_list_elem_t),
|
|
|
+ WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE + 1, 0, CACHEABLE, { 0 } },
|
|
|
+ {DP_RX_DESC_STATUS_TYPE, sizeof(union dp_rx_desc_list_elem_t),
|
|
|
+ WLAN_CFG_RXDMA_MONITOR_STATUS_RING_SIZE + 1, 0, CACHEABLE, { 0 } },
|
|
|
+#endif
|
|
|
+ /* DP HW Link DESCs pools */
|
|
|
+ {DP_HW_LINK_DESC_TYPE, HW_LINK_DESC_SIZE, NUM_HW_LINK_DESCS, 0, NON_CACHEABLE, { 0 } },
|
|
|
+
|
|
|
+};
|
|
|
+
|
|
|
void dp_prealloc_deinit(void)
|
|
|
{
|
|
|
int i;
|
|
|
struct dp_consistent_prealloc *p;
|
|
|
+ struct dp_multi_page_prealloc *mp;
|
|
|
qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
|
|
|
|
|
|
if (!qdf_ctx) {
|
|
@@ -210,7 +304,7 @@ void dp_prealloc_deinit(void)
|
|
|
p = &g_dp_consistent_allocs[i];
|
|
|
|
|
|
if (p->in_use)
|
|
|
- dp_warn("i %d: in use while free", i);
|
|
|
+ dp_warn("i %d: consistent_mem in use while free", i);
|
|
|
|
|
|
if (p->va_aligned) {
|
|
|
dp_debug("i %d: va aligned %pK pa aligned %llx size %d",
|
|
@@ -222,12 +316,31 @@ void dp_prealloc_deinit(void)
|
|
|
qdf_mem_zero(p, sizeof(*p));
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) {
|
|
|
+ mp = &g_dp_multi_page_allocs[i];
|
|
|
+
|
|
|
+ if (mp->in_use)
|
|
|
+ dp_warn("i %d: multi-page mem in use while free", i);
|
|
|
+
|
|
|
+ if (mp->pages.num_pages) {
|
|
|
+ dp_info("i %d: type %d cacheable_pages %pK dma_pages %pK num_pages %d",
|
|
|
+ i, mp->desc_type,
|
|
|
+ mp->pages.cacheable_pages,
|
|
|
+ mp->pages.dma_pages,
|
|
|
+ mp->pages.num_pages);
|
|
|
+ qdf_mem_multi_pages_free(qdf_ctx, &mp->pages,
|
|
|
+ 0, mp->cacheable);
|
|
|
+ qdf_mem_zero(mp, sizeof(*mp));
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
QDF_STATUS dp_prealloc_init(void)
|
|
|
{
|
|
|
int i;
|
|
|
struct dp_consistent_prealloc *p;
|
|
|
+ struct dp_multi_page_prealloc *mp;
|
|
|
qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
|
|
|
|
|
|
if (!qdf_ctx) {
|
|
@@ -257,12 +370,39 @@ QDF_STATUS dp_prealloc_init(void)
|
|
|
}
|
|
|
|
|
|
if (i != QDF_ARRAY_SIZE(g_dp_consistent_allocs)) {
|
|
|
- dp_err("unable to allocate memory!");
|
|
|
- dp_prealloc_deinit();
|
|
|
- return QDF_STATUS_E_FAILURE;
|
|
|
+ dp_err("unable to allocate consistent memory!");
|
|
|
+ goto deinit;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) {
|
|
|
+ mp = &g_dp_multi_page_allocs[i];
|
|
|
+ mp->in_use = false;
|
|
|
+ qdf_mem_multi_pages_alloc(qdf_ctx, &mp->pages,
|
|
|
+ mp->element_size,
|
|
|
+ mp->element_num,
|
|
|
+ 0, mp->cacheable);
|
|
|
+ if (qdf_unlikely(!mp->pages.num_pages)) {
|
|
|
+ dp_warn("i %d: preallocate %d bytes multi-pages failed!",
|
|
|
+ i, (int)(mp->element_size * mp->element_num));
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ mp->pages.is_mem_prealloc = true;
|
|
|
+ dp_info("i %d: cacheable_pages %pK dma_pages %pK num_pages %d",
|
|
|
+ i, mp->pages.cacheable_pages,
|
|
|
+ mp->pages.dma_pages,
|
|
|
+ mp->pages.num_pages);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (i != QDF_ARRAY_SIZE(g_dp_multi_page_allocs)) {
|
|
|
+ dp_err("unable to allocate multi-pages memory!");
|
|
|
+ goto deinit;
|
|
|
}
|
|
|
|
|
|
return QDF_STATUS_SUCCESS;
|
|
|
+deinit:
|
|
|
+ dp_prealloc_deinit();
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
}
|
|
|
|
|
|
void *dp_prealloc_get_coherent(uint32_t *size, void **base_vaddr_unaligned,
|
|
@@ -318,4 +458,68 @@ void dp_prealloc_put_coherent(qdf_size_t size, void *vaddr_unligned,
|
|
|
if (i == QDF_ARRAY_SIZE(g_dp_consistent_allocs))
|
|
|
dp_err("unable to find vaddr %pK", vaddr_unligned);
|
|
|
}
|
|
|
+
|
|
|
+void dp_prealloc_get_multi_pages(uint32_t desc_type,
|
|
|
+ size_t element_size,
|
|
|
+ uint16_t element_num,
|
|
|
+ struct qdf_mem_multi_page_t *pages,
|
|
|
+ bool cacheable)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct dp_multi_page_prealloc *mp;
|
|
|
+
|
|
|
+ for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) {
|
|
|
+ mp = &g_dp_multi_page_allocs[i];
|
|
|
+
|
|
|
+ if (desc_type == mp->desc_type && !mp->in_use &&
|
|
|
+ mp->pages.num_pages && element_size == mp->element_size &&
|
|
|
+ element_num <= mp->element_num) {
|
|
|
+ mp->in_use = true;
|
|
|
+ *pages = mp->pages;
|
|
|
+
|
|
|
+ dp_info("i %d: desc_type %d cacheable_pages %pK"
|
|
|
+ "dma_pages %pK num_pages %d",
|
|
|
+ i, desc_type,
|
|
|
+ mp->pages.cacheable_pages,
|
|
|
+ mp->pages.dma_pages,
|
|
|
+ mp->pages.num_pages);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void dp_prealloc_put_multi_pages(uint32_t desc_type,
|
|
|
+ struct qdf_mem_multi_page_t *pages)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct dp_multi_page_prealloc *mp;
|
|
|
+ bool mp_found = false;
|
|
|
+
|
|
|
+ for (i = 0; i < QDF_ARRAY_SIZE(g_dp_multi_page_allocs); i++) {
|
|
|
+ mp = &g_dp_multi_page_allocs[i];
|
|
|
+
|
|
|
+ if (desc_type == mp->desc_type) {
|
|
|
+ /* compare different address by cacheable flag */
|
|
|
+ mp_found = mp->cacheable ?
|
|
|
+ (mp->pages.cacheable_pages ==
|
|
|
+ pages->cacheable_pages) :
|
|
|
+ (mp->pages.dma_pages == pages->dma_pages);
|
|
|
+ /* find it, put back to prealloc pool */
|
|
|
+ if (mp_found) {
|
|
|
+ dp_info("i %d: desc_type %d returned",
|
|
|
+ i, desc_type);
|
|
|
+ mp->in_use = false;
|
|
|
+ /* is page memory zero needed? */
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!mp_found)
|
|
|
+ dp_warn("Not prealloc pages %pK desc_type %d cacheable_pages %pK dma_pages %pK",
|
|
|
+ pages,
|
|
|
+ desc_type,
|
|
|
+ pages->cacheable_pages,
|
|
|
+ pages->dma_pages);
|
|
|
+}
|
|
|
#endif
|