diff --git a/dp/inc/cdp_txrx_cmn.h b/dp/inc/cdp_txrx_cmn.h index 08e1c97a72..7c8f185963 100644 --- a/dp/inc/cdp_txrx_cmn.h +++ b/dp/inc/cdp_txrx_cmn.h @@ -1210,6 +1210,39 @@ cdp_soc_init(ol_txrx_soc_handle soc, u_int16_t devid, dp_ol_if_ops, devid); } +/** + * cdp_soc_init() - Initialize txrx SOC + * @soc: ol_txrx_soc_handle handle + * @devid: Device ID + * @hif_handle: Opaque HIF handle + * @psoc: Opaque Objmgr handle + * @htc_handle: Opaque HTC handle + * @qdf_dev: QDF device + * @dp_ol_if_ops: Offload Operations + * + * Return: DP SOC handle on success, NULL on failure + */ +static inline QDF_STATUS +cdp_pdev_init(ol_txrx_soc_handle soc, + HTC_HANDLE htc_handle, qdf_device_t qdf_dev, + uint8_t pdev_id) +{ + if (!soc || !soc->ops) { + QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG, + "%s: Invalid Instance:", __func__); + QDF_BUG(0); + return 0; + } + + if (!soc->ops->cmn_drv_ops || + !soc->ops->cmn_drv_ops->txrx_pdev_init) + return 0; + + return soc->ops->cmn_drv_ops->txrx_pdev_init(soc, + htc_handle, qdf_dev, + pdev_id); +} + /** * cdp_soc_deinit() - Deinitialize txrx SOC * @soc: Opaque DP SOC handle diff --git a/dp/inc/cdp_txrx_ops.h b/dp/inc/cdp_txrx_ops.h index ecba0f398e..8c30156760 100644 --- a/dp/inc/cdp_txrx_ops.h +++ b/dp/inc/cdp_txrx_ops.h @@ -353,6 +353,11 @@ struct cdp_cmn_ops { HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops, uint16_t device_id); + QDF_STATUS (*txrx_pdev_init)(ol_txrx_soc_handle soc, + HTC_HANDLE htc_handle, + qdf_device_t qdf_osdev, + uint8_t pdev_id); + /** * txrx_tso_soc_attach() - TSO attach handler triggered during * dynamic tso activation @@ -1075,6 +1080,7 @@ struct ol_if_ops { int (*peer_ast_flowid_map)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle, uint16_t peer_id, uint8_t vdev_id, uint8_t *peer_mac_addr); #endif + int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle); /* TODO: Add any other control path calls required to OL_IF/WMA layer */ }; diff --git a/dp/wifi3.0/dp_htt.c b/dp/wifi3.0/dp_htt.c index 6ca763e938..ddac2487b4 100644 --- a/dp/wifi3.0/dp_htt.c +++ b/dp/wifi3.0/dp_htt.c @@ -490,7 +490,7 @@ htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt) * htt_htc_pkt_pool_free() - Free HTC packet pool * @htt_soc: HTT SOC handle */ -static void +void htt_htc_pkt_pool_free(struct htt_soc *soc) { struct dp_htt_htc_pkt_union *pkt, *next; @@ -3925,9 +3925,10 @@ struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle) } if (i != MAX_PDEV_CNT) { for (j = 0; j < i; j++) { - qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt); - qdf_mem_free(htt_soc->pdevid_tt[i].lmac_ttt); + qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt); + qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt); } + qdf_mem_free(htt_soc); return NULL; } diff --git a/dp/wifi3.0/dp_htt.h b/dp/wifi3.0/dp_htt.h index 28c486f95c..4d66b5760e 100644 --- a/dp/wifi3.0/dp_htt.h +++ b/dp/wifi3.0/dp_htt.h @@ -63,6 +63,8 @@ int htt_wbm_event_record(struct htt_logger *h, uint8_t tx_status, #endif +void htt_htc_pkt_pool_free(struct htt_soc *soc); + #define HTT_TX_MUTEX_TYPE qdf_spinlock_t #define HTT_TX_MUTEX_INIT(_mutex) \ diff --git a/dp/wifi3.0/dp_internal.h b/dp/wifi3.0/dp_internal.h index 24c7ef4a9d..02758538b1 100644 --- a/dp/wifi3.0/dp_internal.h +++ b/dp/wifi3.0/dp_internal.h @@ -1514,8 +1514,6 @@ static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id) return 0; } -bool dp_is_soc_reinit(struct dp_soc *soc); - /* * dp_is_subtype_data() - check if the frame subtype is data * diff --git a/dp/wifi3.0/dp_main.c b/dp/wifi3.0/dp_main.c index a206ae03ff..9df0aba480 100644 --- a/dp/wifi3.0/dp_main.c +++ b/dp/wifi3.0/dp_main.c @@ -160,11 +160,46 @@ dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val) } #endif +static void dp_pdev_srng_deinit(struct dp_pdev *pdev); +static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev); +static void dp_pdev_srng_free(struct dp_pdev *pdev); +static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev); + +static void dp_soc_srng_deinit(struct dp_soc *soc); +static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc); +static void dp_soc_srng_free(struct dp_soc *soc); +static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc); + +static void dp_soc_cfg_init(struct dp_soc *soc); +static void dp_soc_cfg_attach(struct dp_soc *soc); + +static inline +QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, + HTC_HANDLE htc_handle, + qdf_device_t qdf_osdev, + uint8_t pdev_id); +static QDF_STATUS +dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc, + HTC_HANDLE htc_handle, + qdf_device_t qdf_osdev, + uint8_t pdev_id); + +static QDF_STATUS +dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force); + +static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc); +static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc); + void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, struct hif_opaque_softc *hif_handle); static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force); +static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, + uint8_t pdev_id, + int force); static struct dp_soc * -dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle, +dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, + struct hif_opaque_softc *hif_handle, + HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops, uint16_t device_id); static void dp_pktlogmod_exit(struct dp_pdev *handle); @@ -203,6 +238,7 @@ static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num); #define DP_INTR_POLL_TIMER_MS 5 + /* Generic AST entry aging timer value */ #define DP_AST_AGING_TIMER_DEFAULT_MS 1000 #define DP_MCS_LENGTH (6*MAX_MCS) @@ -1494,28 +1530,6 @@ static QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng, return QDF_STATUS_SUCCESS; } -/** - * dp_srng_setup() - Internal function to setup SRNG rings used by data path - * @soc: datapath soc handle - * @srng: srng handle - * @ring_type: ring that needs to be configured - * @mac_id: mac number - * @num_entries: Total number of entries for a given ring - * - * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure - */ -static QDF_STATUS dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng, - int ring_type, int ring_num, int mac_id, - uint32_t num_entries, bool cached) -{ - if (!dp_is_soc_reinit(soc)) { - if (dp_srng_alloc(soc, srng, ring_type, num_entries, cached)) - return QDF_STATUS_E_FAILURE; - } - - return dp_srng_init(soc, srng, ring_type, ring_num, mac_id); -} - /* * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path * @soc: DP SOC handle @@ -1539,19 +1553,6 @@ static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng, srng->hal_srng = NULL; } -/** - * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path - * Any buffers allocated and attached to ring entries are expected to be freed - * before calling this function. - */ -static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng, - int ring_type, int ring_num) -{ - dp_srng_deinit(soc, srng, ring_type, ring_num); - - dp_srng_free(soc, srng); -} - /* TODO: Need this interface from HIF */ void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle); @@ -2638,37 +2639,6 @@ void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id) } } -/* - * Allocate and setup link descriptor pool that will be used by HW for - * various link and queue descriptors and managed by WBM - */ -static QDF_STATUS -dp_hw_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id) -{ - if (!dp_is_soc_reinit(soc)) { - if (dp_hw_link_desc_pool_banks_alloc(soc, mac_id)) - return QDF_STATUS_E_FAILURE; - if (dp_hw_link_desc_ring_alloc(soc)) - return QDF_STATUS_E_FAILURE; - } - - if (dp_hw_link_desc_ring_init(soc)) - return QDF_STATUS_E_FAILURE; - - dp_link_desc_ring_replenish(soc, mac_id); - return QDF_STATUS_SUCCESS; -} - -/* - * Free link descriptor pool that was setup HW - */ -static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id) -{ - dp_hw_link_desc_ring_deinit(soc); - dp_hw_link_desc_ring_free(soc); - dp_hw_link_desc_pool_banks_free(soc, mac_id); -} - #ifdef IPA_OFFLOAD #define REO_DST_RING_SIZE_QCA6290 1023 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0 @@ -3161,20 +3131,20 @@ static inline void dp_create_ext_stats_event(struct dp_soc *soc) } #endif -static -QDF_STATUS dp_setup_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index) +static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index) { - int tx_ring_size; - int tx_comp_ring_size; - struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; - int cached; + wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA, index); - tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx); - dp_ipa_get_tx_ring_size(index, &tx_ring_size); + wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, index); +} - if (dp_srng_setup(soc, &soc->tcl_data_ring[index], TCL_DATA, - index, 0, tx_ring_size, 0)) { - dp_err("dp_srng_setup failed for tcl_data_ring"); +static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc, + uint8_t index) +{ + if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA, index, 0)) { + dp_err("dp_srng_init failed for tcl_data_ring"); goto fail1; } wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned, @@ -3183,17 +3153,9 @@ QDF_STATUS dp_setup_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index) WLAN_MD_DP_SRNG_TCL_DATA, "tcl_data_ring"); - tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx); - dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size); - /* Disable cached desc if NSS offload is enabled */ - cached = WLAN_CFG_DST_RING_CACHED_DESC; - if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) - cached = 0; - - if (dp_srng_setup(soc, &soc->tx_comp_ring[index], - WBM2SW_RELEASE, index, 0, tx_comp_ring_size, - cached)) { - dp_err("dp_srng_setup failed for tx_comp_ring"); + if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, + index, 0)) { + dp_err("dp_srng_init failed for tx_comp_ring"); goto fail1; } wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned, @@ -3208,337 +3170,47 @@ fail1: return QDF_STATUS_E_FAILURE; } -/* - * dp_soc_cmn_setup() - Common SoC level initializion - * @soc: Datapath SOC handle - * - * This is an internal function used to setup common SOC data structures, - * to be called from PDEV attach after receiving HW mode capabilities from FW - */ -static int dp_soc_cmn_setup(struct dp_soc *soc) +static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index) +{ + dp_srng_free(soc, &soc->tcl_data_ring[index]); + dp_srng_free(soc, &soc->tx_comp_ring[index]); +} + +static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc, + uint8_t index) { - int i, cached; - struct hal_reo_params reo_params; int tx_ring_size; int tx_comp_ring_size; - int reo_dst_ring_size; - uint32_t entries; - struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; - QDF_STATUS status; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; + int cached = 0; - if (qdf_atomic_read(&soc->cmn_init_done)) - return 0; + tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx); + dp_ipa_get_tx_ring_size(index, &tx_ring_size); - if (dp_hw_link_desc_pool_setup(soc, WLAN_INVALID_PDEV_ID)) - goto fail1; - - soc_cfg_ctx = soc->wlan_cfg_ctx; - - dp_enable_verbose_debug(soc); - - /* Setup SRNG rings */ - /* Common rings */ - entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx); - - if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0, - entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for wbm_desc_rel_ring")); + if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA, + tx_ring_size, cached)) { + dp_err("dp_srng_alloc failed for tcl_data_ring"); goto fail1; } - wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned, - soc->wbm_desc_rel_ring.alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_WBM_DESC_REL, - "wbm_desc_rel_ring"); - - soc->num_tcl_data_rings = 0; - /* Tx data rings */ - if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) { - soc->num_tcl_data_rings = - wlan_cfg_num_tcl_data_rings(soc_cfg_ctx); - tx_comp_ring_size = - wlan_cfg_tx_comp_ring_size(soc_cfg_ctx); - tx_ring_size = - wlan_cfg_tx_ring_size(soc_cfg_ctx); - - for (i = 0; i < soc->num_tcl_data_rings; i++) { - status = dp_setup_tx_ring_pair_by_index(soc, i); - if (status != QDF_STATUS_SUCCESS) - goto fail1; - } - if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { - status = dp_setup_tx_ring_pair_by_index(soc, - IPA_TCL_DATA_RING_IDX); - if (status != QDF_STATUS_SUCCESS) - goto fail1; - } - } else { - /* This will be incremented during per pdev ring setup */ - soc->num_tcl_data_rings = 0; - } - - entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx); - if (dp_srng_setup(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT, 0, 0, - entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for tcl_cmd_credit_ring.")); - goto fail2; - } - wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned, - soc->tcl_cmd_credit_ring.alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_TCL_CMD, - "tcl_cmd_credit_ring"); - - if (dp_tx_soc_attach(soc)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_tx_soc_attach failed")); - goto fail1; - } - - entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx); - if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0, - entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for tcl_status_ring")); - goto fail2; - } - wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned, - soc->tcl_status_ring.alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_TCL_STATUS, - "tcl_status_ring"); - - reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx); - - /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension - * descriptors - */ - - /* Rx data rings */ - if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) { - soc->num_reo_dest_rings = - wlan_cfg_num_reo_dest_rings(soc_cfg_ctx); - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_INFO, - FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings); - - /* Disable cached desc if NSS offload is enabled */ + tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx); + dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size); + /* Enable cached TCL desc if NSS offload is disabled */ + if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) cached = WLAN_CFG_DST_RING_CACHED_DESC; - if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) - cached = 0; - for (i = 0; i < soc->num_reo_dest_rings; i++) { - if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST, - i, 0, reo_dst_ring_size, cached)) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - FL(RNG_ERR "reo_dest_ring [%d]"), i); - goto fail2; - } - wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned, - soc->reo_dest_ring[i].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_REO_DEST, - "reo_dest_ring"); - - } - } else { - /* This will be incremented during per pdev ring setup */ - soc->num_reo_dest_rings = 0; + if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, + tx_comp_ring_size, cached)) { + dp_err("dp_srng_alloc failed for tx_comp_ring"); + goto fail1; } - entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx); - /* LMAC RxDMA to SW Rings configuration */ - if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) { + return QDF_STATUS_SUCCESS; - for (i = 0; i < MAX_RX_MAC_RINGS; i++) { - if (dp_srng_setup(soc, &soc->rxdma_err_dst_ring[i], - RXDMA_DST, 0, i, entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - FL(RNG_ERR "rxdma_err_dst_ring")); - goto fail2; - } - wlan_minidump_log(soc->rxdma_err_dst_ring[i].base_vaddr_unaligned, - soc->rxdma_err_dst_ring[i].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_RXDMA_ERR_DST, - "rxdma_err_dst"); - - } - } - /* TBD: call dp_rx_init to setup Rx SW descriptors */ - - /* REO reinjection ring */ - entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx); - if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0, - entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for reo_reinject_ring")); - goto fail2; - } - wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned, - soc->reo_reinject_ring.alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_REO_REINJECT, - "reo_reinject_ring"); - - /* Rx release ring */ - if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0, - wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx), - 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for rx_rel_ring")); - goto fail2; - } - - wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned, - soc->rx_rel_ring.alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_RX_REL, - "reo_release_ring"); - - /* Rx exception ring */ - entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx); - if (dp_srng_setup(soc, &soc->reo_exception_ring, - REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for reo_exception_ring")); - goto fail2; - } - - wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned, - soc->reo_exception_ring.alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_REO_EXCEPTION, - "reo_exception_ring"); - - /* REO command and status rings */ - if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0, - wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx), - 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for reo_cmd_ring")); - goto fail2; - } - wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned, - soc->reo_cmd_ring.alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_REO_CMD, - "reo_cmd_ring"); - - hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng); - TAILQ_INIT(&soc->rx.reo_cmd_list); - qdf_spinlock_create(&soc->rx.reo_cmd_lock); - - if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0, - wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx), - 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for reo_status_ring")); - goto fail2; - } - wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned, - soc->reo_status_ring.alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_REO_STATUS, - "reo_status_ring"); - - /* - * Skip registering hw ring interrupts for WMAC2 on IPQ6018 - * WMAC2 is not there in IPQ6018 platform. - */ - if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018) { - dp_soc_disable_mac2_intr_mask(soc); - } - - /* Reset the cpu ring map if radio is NSS offloaded */ - if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) { - dp_soc_reset_cpu_ring_map(soc); - dp_soc_reset_intr_mask(soc); - } - - /* Setup HW REO */ - qdf_mem_zero(&reo_params, sizeof(reo_params)); - - if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) { - - /* - * Reo ring remap is not required if both radios - * are offloaded to NSS - */ - if (!dp_reo_remap_config(soc, - &reo_params.remap1, - &reo_params.remap2)) - goto out; - - reo_params.rx_hash_enabled = true; - } - - /* setup the global rx defrag waitlist */ - TAILQ_INIT(&soc->rx.defrag.waitlist); - soc->rx.defrag.timeout_ms = - wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx); - soc->rx.defrag.next_flush_ms = 0; - soc->rx.flags.defrag_timeout_check = - wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx); - qdf_spinlock_create(&soc->rx.defrag.defrag_lock); - - dp_create_ext_stats_event(soc); -out: - /* - * set the fragment destination ring - */ - dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring); - - hal_reo_setup(soc->hal_soc, &reo_params); - - hal_reo_set_err_dst_remap(soc->hal_soc); - - qdf_atomic_set(&soc->cmn_init_done, 1); - - dp_soc_wds_attach(soc); - - qdf_nbuf_queue_init(&soc->htt_stats.msg); - return 0; -fail2: - dp_tx_soc_detach(soc); fail1: - /* - * Cleanup will be done as part of soc_detach, which will - * be called on pdev attach failure - */ return QDF_STATUS_E_FAILURE; } -/* - * dp_soc_cmn_cleanup() - Common SoC level De-initializion - * - * @soc: Datapath SOC handle - * - * This function is responsible for cleaning up DP resource of Soc - * initialled in dp_pdev_attach_wifi3-->dp_soc_cmn_setup, since - * dp_soc_detach_wifi3 could not identify some of them - * whether they have done initialized or not accurately. - * - */ -static void dp_soc_cmn_cleanup(struct dp_soc *soc) -{ - if (!dp_is_soc_reinit(soc)) { - dp_tx_soc_detach(soc); - } - - qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock); - qdf_spinlock_destroy(&soc->rx.reo_cmd_lock); -} - -static QDF_STATUS -dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, - int force); - static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev) { struct cdp_lro_hash_config lro_hash; @@ -3563,11 +3235,11 @@ static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev) } qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4, - (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * - LRO_IPV4_SEED_ARR_SZ)); + (sizeof(lro_hash.toeplitz_hash_ipv4[0]) * + LRO_IPV4_SEED_ARR_SZ)); qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6, - (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * - LRO_IPV6_SEED_ARR_SZ)); + (sizeof(lro_hash.toeplitz_hash_ipv6[0]) * + LRO_IPV6_SEED_ARR_SZ)); qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config); @@ -3605,15 +3277,14 @@ static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev) } /* -* dp_rxdma_ring_setup() - configure the RX DMA rings -* @soc: data path SoC handle -* @pdev: Physical device handle -* -* Return: 0 - success, > 0 - failure -*/ + * dp_rxdma_ring_setup() - configure the RX DMA rings + * @soc: data path SoC handle + * @pdev: Physical device handle + * + * Return: 0 - success, > 0 - failure + */ #ifdef QCA_HOST2FW_RXBUF_RING -static int dp_rxdma_ring_setup(struct dp_soc *soc, - struct dp_pdev *pdev) +static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev) { struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; int max_mac_rings; @@ -3626,19 +3297,26 @@ static int dp_rxdma_ring_setup(struct dp_soc *soc, for (i = 0; i < max_mac_rings; i++) { dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i); - if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i], - RXDMA_BUF, 1, i, ring_size, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - FL("failed rx mac ring setup")); + if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i], + RXDMA_BUF, ring_size, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("failed rx mac ring setup")); + return QDF_STATUS_E_FAILURE; + } + + if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i], + RXDMA_BUF, 1, i)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("failed rx mac ring setup")); + + dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]); return QDF_STATUS_E_FAILURE; } } return QDF_STATUS_SUCCESS; } #else -static int dp_rxdma_ring_setup(struct dp_soc *soc, - struct dp_pdev *pdev) +static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev) { return QDF_STATUS_SUCCESS; } @@ -3710,13 +3388,20 @@ static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); /* Setup second Rx refill buffer ring */ - if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, - IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id, entries, 0) - ) { + if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, + entries, 0)) { QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, FL("dp_srng_setup failed second rx refill ring")); return QDF_STATUS_E_FAILURE; } + + if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, + IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed second rx refill ring")); + return QDF_STATUS_E_FAILURE; + } + return QDF_STATUS_SUCCESS; } @@ -3730,8 +3415,8 @@ static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc, static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc, struct dp_pdev *pdev) { - dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, - IPA_RX_REFILL_BUF_RING_IDX); + dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0); + dp_srng_free(soc, &pdev->rx_refill_buf_ring2); } #else @@ -3749,119 +3434,200 @@ static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc, #if !defined(DISABLE_MON_CONFIG) /** - * dp_mon_rings_setup() - Initialize Monitor rings based on target - * @soc: soc handle - * @pdev: physical device handle + * dp_mon_ring_deinit() - Deinitialize monitor rings + * @pdev: DP pdev handle * - * Return: nonzero on failure and zero on success */ -static -QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev) +static void dp_mon_rings_deinit(struct dp_pdev *pdev) +{ + int mac_id = 0; + struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; + struct dp_soc *soc = pdev->soc; + + pdev_cfg_ctx = pdev->wlan_cfg_ctx; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, + pdev->pdev_id); + + dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id], + RXDMA_MONITOR_STATUS, 0); + + if (!soc->wlan_cfg_ctx->rxdma1_enable) + continue; + + dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id], + RXDMA_MONITOR_BUF, 0); + dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id], + RXDMA_MONITOR_DST, 0); + dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id], + RXDMA_MONITOR_DESC, 0); + } +} + +/** + * dp_mon_rings_free() - free monitor rings + * @pdev: Datapath pdev handle + * + */ +static void dp_mon_rings_free(struct dp_pdev *pdev) +{ + int mac_id = 0; + struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; + struct dp_soc *soc = pdev->soc; + + pdev_cfg_ctx = pdev->wlan_cfg_ctx; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, + pdev->pdev_id); + + dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]); + + if (!soc->wlan_cfg_ctx->rxdma1_enable) + continue; + + dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]); + dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]); + dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]); + } +} + +/** + * dp_mon_rings_init() - Initialize monitor srng rings + * @pdev: Datapath pdev handle + * + * return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_NOMEM on failure + */ +static +QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev) +{ + int mac_id = 0; + struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; + + pdev_cfg_ctx = pdev->wlan_cfg_ctx; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, + pdev->pdev_id); + + if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id], + RXDMA_MONITOR_STATUS, 0, lmac_id)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_status_ring")); + goto fail1; + } + + if (!soc->wlan_cfg_ctx->rxdma1_enable) + continue; + + if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id], + RXDMA_MONITOR_BUF, 0, lmac_id)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_buf_ring ")); + goto fail1; + } + + if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id], + RXDMA_MONITOR_DST, 0, lmac_id)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_dst_ring")); + goto fail1; + } + + if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id], + RXDMA_MONITOR_DESC, 0, lmac_id)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_desc_ring")); + goto fail1; + } + } + return QDF_STATUS_SUCCESS; + +fail1: + dp_mon_rings_deinit(pdev); + return QDF_STATUS_E_NOMEM; +} + +/** + * dp_mon_rings_alloc() - Allocate memory for monitor srng rings + * @soc: Datapath soc handle + * @pdev: Datapath pdev handle + * + * return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_NOMEM on failure + */ +static +QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev) { int mac_id = 0; - int pdev_id = pdev->pdev_id; int entries; struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx; pdev_cfg_ctx = pdev->wlan_cfg_ctx; for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { - int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); + int lmac_id = + dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); + entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx); + if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id], + RXDMA_MONITOR_STATUS, entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_status_ring")); + goto fail1; + } - if (soc->wlan_cfg_ctx->rxdma1_enable) { - entries = - wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx); - if (dp_srng_setup(soc, - &soc->rxdma_mon_buf_ring[lmac_id], - RXDMA_MONITOR_BUF, 0, lmac_id, - entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - FL(RNG_ERR "rxdma_mon_buf_ring ")); - return QDF_STATUS_E_NOMEM; - } - wlan_minidump_log(soc->rxdma_mon_buf_ring[lmac_id].base_vaddr_unaligned, - soc->rxdma_mon_buf_ring[lmac_id].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_RXDMA_MON_BUF, - "rxdma_mon_buf_ring"); + if (!soc->wlan_cfg_ctx->rxdma1_enable) + continue; - entries = - wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx); - if (dp_srng_setup(soc, - &soc->rxdma_mon_dst_ring[lmac_id], - RXDMA_MONITOR_DST, 0, lmac_id, - entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - FL(RNG_ERR "rxdma_mon_dst_ring")); - return QDF_STATUS_E_NOMEM; - } - wlan_minidump_log(soc->rxdma_mon_dst_ring[lmac_id].base_vaddr_unaligned, - soc->rxdma_mon_dst_ring[lmac_id].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_RXDMA_MON_DST, - "rxdma_mon_dst"); + entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx); + if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id], + RXDMA_MONITOR_BUF, entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_buf_ring ")); + goto fail1; + } - entries = - wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx); - if (dp_srng_setup(soc, - &soc->rxdma_mon_status_ring[lmac_id], - RXDMA_MONITOR_STATUS, 0, lmac_id, - entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - FL(RNG_ERR "rxdma_mon_status_ring")); - return QDF_STATUS_E_NOMEM; - } - wlan_minidump_log(soc->rxdma_mon_status_ring[lmac_id].base_vaddr_unaligned, - soc->rxdma_mon_status_ring[lmac_id].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_RXDMA_MON_STATUS, - "rxdma_mon_status"); + entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx); + if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id], + RXDMA_MONITOR_DST, entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_dst_ring")); + goto fail1; + } - entries = - wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx); - if (dp_srng_setup(soc, - &soc->rxdma_mon_desc_ring[lmac_id], - RXDMA_MONITOR_DESC, 0, lmac_id, - entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - FL(RNG_ERR "rxdma_mon_desc_ring")); - return QDF_STATUS_E_NOMEM; - } - wlan_minidump_log(soc->rxdma_mon_desc_ring[lmac_id].base_vaddr_unaligned, - soc->rxdma_mon_desc_ring[lmac_id].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_RXDMA_MON_DESC, - "rxdma_mon_desc"); - - } else { - entries = - wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx); - if (dp_srng_setup(soc, - &soc->rxdma_mon_status_ring[lmac_id], - RXDMA_MONITOR_STATUS, 0, lmac_id, - entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - FL(RNG_ERR "rxdma_mon_status_ring")); - return QDF_STATUS_E_NOMEM; - } - wlan_minidump_log(soc->rxdma_mon_status_ring[lmac_id].base_vaddr_unaligned, - soc->rxdma_mon_status_ring[lmac_id].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_RXDMA_MON_STATUS, - "rxdma_mon_status_ring"); + entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx); + if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id], + RXDMA_MONITOR_DESC, entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_mon_desc_ring")); + goto fail1; } } - return QDF_STATUS_SUCCESS; + +fail1: + dp_mon_rings_free(pdev); + return QDF_STATUS_E_NOMEM; } #else +static void dp_mon_rings_free(struct dp_pdev *pdev) +{ +} + +static void dp_mon_rings_deinit(struct dp_pdev *pdev) +{ +} + static -QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev) +QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev) +{ + return QDF_STATUS_SUCCESS; +} + +static +QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev) { return QDF_STATUS_SUCCESS; } @@ -3927,71 +3693,27 @@ static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, qdf_device_t qdf_osdev, uint8_t pdev_id) { - int ring_size; - int entries; - struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; - int nss_cfg; - void *sojourn_buf; struct dp_soc *soc = (struct dp_soc *)txrx_soc; struct dp_pdev *pdev = NULL; - QDF_STATUS ret; - - if (dp_is_soc_reinit(soc)) { - pdev = soc->pdev_list[pdev_id]; - } else { - pdev = qdf_mem_malloc(sizeof(*pdev)); - wlan_minidump_log(pdev, sizeof(*pdev), - soc->ctrl_psoc, - WLAN_MD_DP_PDEV, - "dp_pdev"); - } + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + int nss_cfg; + pdev = qdf_mem_malloc(sizeof(*pdev)); if (!pdev) { QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("DP PDEV memory allocation failed")); - ret = QDF_STATUS_E_NOMEM; - goto fail0; - } - pdev->soc = soc; - pdev->pdev_id = pdev_id; - - pdev->filter = dp_mon_filter_alloc(pdev); - if (!pdev->filter) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("Memory allocation failed for monitor filters")); - qdf_mem_free(pdev); - ret = QDF_STATUS_E_NOMEM; - goto fail0; - } - - /* - * Variable to prevent double pdev deinitialization during - * radio detach execution .i.e. in the absence of any vdev. - */ - pdev->pdev_deinit = 0; - pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer)); - - if (!pdev->invalid_peer) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("Invalid peer memory allocation failed")); - dp_mon_filter_dealloc(pdev); - qdf_mem_free(pdev); - ret = QDF_STATUS_E_NOMEM; + FL("DP PDEV memory allocation failed")); goto fail0; } + wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc, + WLAN_MD_DP_PDEV, "dp_pdev"); soc_cfg_ctx = soc->wlan_cfg_ctx; pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc); if (!pdev->wlan_cfg_ctx) { QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("pdev cfg_attach failed")); - - qdf_mem_free(pdev->invalid_peer); - dp_mon_filter_dealloc(pdev); - qdf_mem_free(pdev); - ret = QDF_STATUS_E_FAILURE; - goto fail0; + FL("pdev cfg_attach failed")); + goto fail1; } /* @@ -3999,283 +3721,65 @@ static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc, */ nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx); wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, - (nss_cfg & (1 << pdev_id))); + (nss_cfg & (1 << pdev_id))); + pdev->soc = soc; + pdev->pdev_id = pdev_id; soc->pdev_list[pdev_id] = pdev; pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id); soc->pdev_count++; - pdev->target_pdev_id = - dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id); - - if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB && - pdev->lmac_id == PHYB_2G_LMAC_ID) { - pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID; - } - - TAILQ_INIT(&pdev->vdev_list); - qdf_spinlock_create(&pdev->vdev_list_lock); - pdev->vdev_count = 0; - - qdf_spinlock_create(&pdev->tx_mutex); - qdf_spinlock_create(&pdev->neighbour_peer_mutex); - TAILQ_INIT(&pdev->neighbour_peers_list); - pdev->neighbour_peers_added = false; - pdev->monitor_configured = false; - pdev->enable_reap_timer_non_pkt = false; - - if (dp_soc_cmn_setup(soc)) { + /* Allocate memory for pdev srng rings */ + if (dp_pdev_srng_alloc(pdev)) { QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_soc_cmn_setup failed")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - - /* Setup per PDEV TCL rings if configured */ - if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { - ring_size = - wlan_cfg_tx_ring_size(soc_cfg_ctx); - - if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA, - pdev_id, pdev_id, ring_size, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for tcl_data_ring")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - wlan_minidump_log(soc->tcl_data_ring[pdev_id].base_vaddr_unaligned, - soc->tcl_data_ring[pdev_id].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_TCL_DATA, "tcl_data"); - - ring_size = - wlan_cfg_tx_comp_ring_size(soc_cfg_ctx); - - if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id], - WBM2SW_RELEASE, pdev_id, pdev_id, - ring_size, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for tx_comp_ring")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - wlan_minidump_log(soc->tcl_data_ring[pdev_id].base_vaddr_unaligned, - soc->tcl_data_ring[pdev_id].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_TX_COMP, - "tcl_comp_ring"); - - soc->num_tcl_data_rings++; - } - - /* Tx specific init */ - if (dp_tx_pdev_attach(pdev)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_tx_pdev_attach failed")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - - ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx); - /* Setup per PDEV REO rings if configured */ - if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) { - if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST, - pdev_id, pdev_id, ring_size, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed for reo_dest_ringn")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - wlan_minidump_log(soc->reo_dest_ring[pdev_id].base_vaddr_unaligned, - soc->reo_dest_ring[pdev_id].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_REO_DEST, - "reo_dest_ring"); - - soc->num_reo_dest_rings++; - } - - ring_size = - wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc->wlan_cfg_ctx); - - if (dp_srng_setup(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], - RXDMA_BUF, 0, pdev->lmac_id, ring_size, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_srng_setup failed rx refill ring")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - - if (dp_rxdma_ring_setup(soc, pdev)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("RXDMA ring config failed")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - - if (dp_mon_rings_setup(soc, pdev)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("MONITOR rings setup failed")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - - entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx); - if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) { - if (dp_srng_setup(soc, - &soc->rxdma_err_dst_ring[pdev->lmac_id], - RXDMA_DST, - 0, pdev->lmac_id, entries, 0)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL(RNG_ERR "rxdma_err_dst_ring")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - wlan_minidump_log(soc->rxdma_err_dst_ring[pdev->lmac_id].base_vaddr_unaligned, - soc->rxdma_err_dst_ring[pdev->lmac_id].alloc_size, - soc->ctrl_psoc, - WLAN_MD_DP_SRNG_RXDMA_ERR_DST, - "rxdma_err_dst_ring"); - - } - - if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) { - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - - if (dp_ipa_ring_resource_setup(soc, pdev)) { - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - - if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - FL("dp_ipa_uc_attach failed")); - ret = QDF_STATUS_E_FAILURE; - goto fail1; + FL("dp_pdev_srng_alloc failed")); + goto fail2; } /* Rx specific init */ - if (dp_rx_pdev_attach(pdev)) { + if (dp_rx_pdev_desc_pool_alloc(pdev)) { QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, FL("dp_rx_pdev_attach failed")); - ret = QDF_STATUS_E_FAILURE; - goto fail2; + goto fail3; } - DP_STATS_INIT(pdev); - - /* Monitor filter init */ - pdev->mon_filter_mode = MON_FILTER_ALL; - pdev->fp_mgmt_filter = FILTER_MGMT_ALL; - pdev->fp_ctrl_filter = FILTER_CTRL_ALL; - pdev->fp_data_filter = FILTER_DATA_ALL; - pdev->mo_mgmt_filter = FILTER_MGMT_ALL; - pdev->mo_ctrl_filter = FILTER_CTRL_ALL; - pdev->mo_data_filter = FILTER_DATA_ALL; - - dp_local_peer_id_pool_init(pdev); - - dp_dscp_tid_map_setup(pdev); - dp_pcp_tid_map_setup(pdev); - /* Rx monitor mode specific init */ - if (dp_rx_pdev_mon_attach(pdev)) { + if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) { QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - "dp_rx_pdev_mon_attach failed"); - ret = QDF_STATUS_E_FAILURE; - goto fail2; + "dp_rx_pdev_mon_attach failed"); + goto fail4; } - if (dp_wdi_event_attach(pdev)) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - "dp_wdi_evet_attach failed"); - ret = QDF_STATUS_E_FAILURE; - goto wdi_attach_fail; - } - - /* set the reo destination during initialization */ - pdev->reo_dest = pdev->pdev_id + 1; - - /* - * initialize ppdu tlv list - */ - TAILQ_INIT(&pdev->ppdu_info_list); - pdev->tlv_count = 0; - pdev->list_depth = 0; - - qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats)); - - pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev, - sizeof(struct cdp_tx_sojourn_stats), 0, 4, - TRUE); - - if (pdev->sojourn_buf) { - sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf); - qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats)); - } - /* initlialize cal client timer */ - dp_cal_client_attach(&pdev->cal_client_ctx, - dp_pdev_to_cdp_pdev(pdev), - pdev->soc->osdev, - &dp_iterate_update_peer_list); - qdf_event_create(&pdev->fw_peer_stats_event); - - pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); - - dp_init_tso_stats(pdev); - - if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS) { - ret = QDF_STATUS_E_FAILURE; - goto fail1; - } - - dp_tx_ppdu_stats_attach(pdev); - return QDF_STATUS_SUCCESS; - -wdi_attach_fail: - /* - * dp_mon_link_desc_pool_cleanup is done in dp_pdev_detach - * and hence need not to be done here. - */ - dp_rx_pdev_mon_detach(pdev); - +fail4: + dp_rx_pdev_desc_pool_free(pdev); +fail3: + dp_pdev_srng_free(pdev); fail2: - dp_rx_pdev_detach(pdev); - dp_ipa_uc_detach(soc, pdev); - + wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); fail1: - soc->pdev_count--; - if (pdev->invalid_peer) - qdf_mem_free(pdev->invalid_peer); - - if (pdev->filter) - dp_mon_filter_dealloc(pdev); - - dp_pdev_detach((struct cdp_pdev *)pdev, 0); + qdf_mem_free(pdev); fail0: - return ret; + return QDF_STATUS_E_FAILURE; } /* -* dp_rxdma_ring_cleanup() - configure the RX DMA rings -* @soc: data path SoC handle -* @pdev: Physical device handle -* -* Return: void -*/ + * dp_rxdma_ring_cleanup() - configure the RX DMA rings + * @soc: data path SoC handle + * @pdev: Physical device handle + * + * Return: void + */ #ifdef QCA_HOST2FW_RXBUF_RING -static void dp_rxdma_ring_cleanup(struct dp_soc *soc, - struct dp_pdev *pdev) +static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) { int i; - for (i = 0; i < MAX_RX_MAC_RINGS; i++) - dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i], - RXDMA_BUF, 1); + for (i = 0; i < MAX_RX_MAC_RINGS; i++) { + dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1); + dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]); + } if (soc->reap_timer_init) { qdf_timer_free(&soc->mon_reap_timer); @@ -4283,8 +3787,7 @@ static void dp_rxdma_ring_cleanup(struct dp_soc *soc, } } #else -static void dp_rxdma_ring_cleanup(struct dp_soc *soc, - struct dp_pdev *pdev) +static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev) { if (soc->lmac_timer_init) { qdf_timer_stop(&soc->lmac_reap_timer); @@ -4306,10 +3809,10 @@ static void dp_neighbour_peers_detach(struct dp_pdev *pdev) struct dp_neighbour_peer *temp_peer = NULL; TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list, - neighbour_peer_list_elem, temp_peer) { + neighbour_peer_list_elem, temp_peer) { /* delete this peer from the list */ TAILQ_REMOVE(&pdev->neighbour_peers_list, - peer, neighbour_peer_list_elem); + peer, neighbour_peer_list_elem); qdf_mem_free(peer); } @@ -4340,132 +3843,6 @@ static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev) } -/** - * dp_mon_ring_deinit() - Deinitialize monitor rings - * @soc: DP soc handle - * @pdev: DP pdev handle - * mac_id: Mac ID - * - * Return: void - */ -#if !defined(DISABLE_MON_CONFIG) -static -void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev, - int mac_id) -{ - if (soc->wlan_cfg_ctx->rxdma1_enable) { - dp_srng_deinit(soc, - &soc->rxdma_mon_buf_ring[mac_id], - RXDMA_MONITOR_BUF, 0); - - dp_srng_deinit(soc, - &soc->rxdma_mon_dst_ring[mac_id], - RXDMA_MONITOR_DST, 0); - - dp_srng_deinit(soc, - &soc->rxdma_mon_status_ring[mac_id], - RXDMA_MONITOR_STATUS, 0); - - dp_srng_deinit(soc, - &soc->rxdma_mon_desc_ring[mac_id], - RXDMA_MONITOR_DESC, 0); - } else { - dp_srng_deinit(soc, - &soc->rxdma_mon_status_ring[mac_id], - RXDMA_MONITOR_STATUS, 0); - } -} - -static -void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev, - int mac_id) -{ - if (soc->wlan_cfg_ctx->rxdma1_enable) { - wlan_minidump_remove(soc->rxdma_mon_buf_ring[mac_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, - &soc->rxdma_mon_buf_ring[mac_id], - RXDMA_MONITOR_BUF, 0); - - wlan_minidump_remove(soc->rxdma_mon_dst_ring[mac_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, - &soc->rxdma_mon_dst_ring[mac_id], - RXDMA_MONITOR_DST, 0); - - wlan_minidump_remove(soc->rxdma_mon_status_ring[mac_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, - &soc->rxdma_mon_status_ring[mac_id], - RXDMA_MONITOR_STATUS, 0); - - wlan_minidump_remove(soc->rxdma_mon_desc_ring[mac_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, - &soc->rxdma_mon_desc_ring[mac_id], - RXDMA_MONITOR_DESC, 0); - - wlan_minidump_remove(soc->rxdma_err_dst_ring[mac_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, - &soc->rxdma_err_dst_ring[mac_id], - RXDMA_DST, 0); - } else { - wlan_minidump_remove(soc->rxdma_mon_status_ring[mac_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, - &soc->rxdma_mon_status_ring[mac_id], - RXDMA_MONITOR_STATUS, 0); - - wlan_minidump_remove(soc->rxdma_err_dst_ring[mac_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, - &soc->rxdma_err_dst_ring[mac_id], - RXDMA_DST, 0); - } -} -#else -/** - * dp_mon_ring_deinit() - Deinitialize monitor rings - * @soc: DP soc handle - * @pdev: DP pdev handle - * mac_id: Mac ID - * - * Return: void - */ -static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev, - int mac_id) -{ -} - -/** - * dp_mon_ring_cleanup() - Cleanup all monitor rings - * @soc: DP soc handle - * @pdev: DP pdev handle - * mac_id: Mac ID - * - * Return: void - */ -static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev, - int mac_id) -{ -} -#endif - -/** - * dp_pdev_mem_reset() - Reset txrx pdev memory - * @pdev: dp pdev handle - * - * Return: None - */ -static void dp_pdev_mem_reset(struct dp_pdev *pdev) -{ - uint16_t len = 0; - uint8_t *dp_pdev_offset = (uint8_t *)pdev; - - len = sizeof(struct dp_pdev) - - offsetof(struct dp_pdev, pdev_deinit) - - sizeof(pdev->pdev_deinit); - dp_pdev_offset = dp_pdev_offset + - offsetof(struct dp_pdev, pdev_deinit) + - sizeof(pdev->pdev_deinit); - - qdf_mem_zero(dp_pdev_offset, len); -} - #ifdef WLAN_DP_PENDING_MEM_FLUSH /** * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev @@ -4516,66 +3893,43 @@ static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev) static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force) { struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; - struct dp_soc *soc = pdev->soc; qdf_nbuf_t curr_nbuf, next_nbuf; - int mac_id; - /* - * Prevent double pdev deinitialization during radio detach - * execution .i.e. in the absence of any vdev - */ if (pdev->pdev_deinit) return; - pdev->pdev_deinit = 1; - - dp_wdi_event_detach(pdev); + dp_tx_me_exit(pdev); + dp_rx_fst_detach(pdev->soc, pdev); + dp_rx_pdev_mon_buffers_free(pdev); + dp_rx_pdev_buffers_free(pdev); + dp_rx_pdev_mon_desc_pool_deinit(pdev); + dp_rx_pdev_desc_pool_deinit(pdev); + dp_htt_ppdu_stats_detach(pdev); + dp_tx_ppdu_stats_detach(pdev); + qdf_event_destroy(&pdev->fw_peer_stats_event); + dp_cal_client_detach(&pdev->cal_client_ctx); + if (pdev->sojourn_buf) + qdf_nbuf_free(pdev->sojourn_buf); dp_pdev_flush_pending_vdevs(pdev); - dp_tx_pdev_detach(pdev); - - if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { - dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id], - TCL_DATA, pdev->pdev_id); - dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id], - WBM2SW_RELEASE, pdev->pdev_id); - } - dp_pktlogmod_exit(pdev); - - dp_rx_fst_detach(soc, pdev); - dp_rx_pdev_buffers_free(pdev); - dp_rx_pdev_desc_pool_deinit(pdev); - dp_rx_pdev_mon_detach(pdev); dp_neighbour_peers_detach(pdev); + qdf_spinlock_destroy(&pdev->tx_mutex); qdf_spinlock_destroy(&pdev->vdev_list_lock); - dp_ipa_uc_detach(soc, pdev); + if (pdev->invalid_peer) + qdf_mem_free(pdev->invalid_peer); - dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev); + if (pdev->filter) + dp_mon_filter_dealloc(pdev); - /* Cleanup per PDEV REO rings if configured */ - if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { - dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id], - REO_DST, pdev->pdev_id); - } + dp_pdev_srng_deinit(pdev); - dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], - RXDMA_BUF, 0); - - dp_rxdma_ring_cleanup(soc, pdev); - - for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { - int lmac_id = - dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); - - dp_mon_ring_deinit(soc, pdev, lmac_id); - - dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id], - RXDMA_DST, 0); - } + dp_ipa_uc_detach(pdev->soc, pdev); + dp_cleanup_ipa_rx_refill_buf_ring(pdev->soc, pdev); + dp_rxdma_ring_cleanup(pdev->soc, pdev); curr_nbuf = pdev->invalid_peer_head_msdu; while (curr_nbuf) { @@ -4586,32 +3940,8 @@ static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force) pdev->invalid_peer_head_msdu = NULL; pdev->invalid_peer_tail_msdu = NULL; - dp_htt_ppdu_stats_detach(pdev); - - dp_tx_ppdu_stats_detach(pdev); - - qdf_nbuf_free(pdev->sojourn_buf); - - dp_cal_client_detach(&pdev->cal_client_ctx); - - soc->pdev_count--; - - /* only do soc common cleanup when last pdev do detach */ - if (!(soc->pdev_count)) - dp_reo_cmdlist_destroy(soc); - - wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); - if (pdev->invalid_peer) - qdf_mem_free(pdev->invalid_peer); - - /* - * Fee the monitor filter allocated and stored - */ - if (pdev->filter) - dp_mon_filter_dealloc(pdev); - - qdf_mem_free(pdev->dp_txrx_handle); - dp_pdev_mem_reset(pdev); + dp_wdi_event_detach(pdev); + pdev->pdev_deinit = 1; } /** @@ -4626,16 +3956,14 @@ static QDF_STATUS dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force) { - struct dp_soc *soc = (struct dp_soc *)psoc; - struct dp_pdev *txrx_pdev = - dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, - pdev_id); + struct dp_pdev *txrx_pdev; + + txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, + pdev_id); if (!txrx_pdev) return QDF_STATUS_E_FAILURE; - soc->dp_soc_reinit = TRUE; - dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force); return QDF_STATUS_SUCCESS; @@ -4652,59 +3980,15 @@ static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force) { struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev; struct dp_soc *soc = pdev->soc; - struct rx_desc_pool *rx_desc_pool; - int mac_id; - int lmac_id; - if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { - wlan_minidump_remove( - soc->tcl_data_ring[pdev->pdev_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id], - TCL_DATA, pdev->pdev_id); - wlan_minidump_remove( - soc->tx_comp_ring[pdev->pdev_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id], - WBM2SW_RELEASE, pdev->pdev_id); - } - - dp_mon_link_free(pdev); - - dp_rx_pdev_detach(pdev); - /* Cleanup per PDEV REO rings if configured */ - if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { - wlan_minidump_remove( - soc->reo_dest_ring[pdev->pdev_id].base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id], - REO_DST, pdev->pdev_id); - } - dp_rxdma_ring_cleanup(soc, pdev); - wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); - - dp_srng_cleanup(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], - RXDMA_BUF, 0); - dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev); - - for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { - lmac_id = - dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); - dp_mon_ring_cleanup(soc, pdev, lmac_id); - - dp_srng_cleanup(soc, &soc->rxdma_err_dst_ring[lmac_id], - RXDMA_DST, 0); - - if (dp_is_soc_reinit(soc)) { - rx_desc_pool = &soc->rx_desc_status[lmac_id]; - dp_rx_desc_pool_free(soc, rx_desc_pool); - rx_desc_pool = &soc->rx_desc_mon[lmac_id]; - dp_rx_desc_pool_free(soc, rx_desc_pool); - } - } - - /* only do soc common cleanup when last pdev do detach */ - if (!(soc->pdev_count)) - dp_soc_cmn_cleanup(soc); + dp_rx_pdev_mon_desc_pool_free(pdev); + dp_rx_pdev_desc_pool_free(pdev); + dp_pdev_srng_free(pdev); + soc->pdev_count--; soc->pdev_list[pdev->pdev_id] = NULL; + + wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx); wlan_minidump_remove(pdev); qdf_mem_free(pdev); } @@ -4720,23 +4004,18 @@ static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force) static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force) { - struct dp_soc *soc = (struct dp_soc *)psoc; - struct dp_pdev *txrx_pdev = - dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, - pdev_id); + struct dp_pdev *pdev; - if (!txrx_pdev) { - dp_err("Couldn't find dp pdev"); + pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc, + pdev_id); + + if (!pdev) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("DP PDEV is Null for pdev id %d"), pdev_id); return QDF_STATUS_E_FAILURE; } - if (dp_is_soc_reinit(soc)) { - dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force); - } else { - dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force); - dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force); - } - + dp_pdev_detach((struct cdp_pdev *)pdev, force); return QDF_STATUS_SUCCESS; } @@ -4765,25 +4044,17 @@ static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc) qdf_spinlock_destroy(&soc->reo_desc_freelist_lock); } -/** - * dp_soc_mem_reset() - Reset Dp Soc memory - * @soc: DP handle +/* + * dp_soc_reset_txrx_ring_map() - reset tx ring map + * @soc: DP SOC handle * - * Return: None */ -static void dp_soc_mem_reset(struct dp_soc *soc) +static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc) { - uint16_t len = 0; - uint8_t *dp_soc_offset = (uint8_t *)soc; + uint32_t i; - len = sizeof(struct dp_soc) - - offsetof(struct dp_soc, dp_soc_reinit) - - sizeof(soc->dp_soc_reinit); - dp_soc_offset = dp_soc_offset + - offsetof(struct dp_soc, dp_soc_reinit) + - sizeof(soc->dp_soc_reinit); - - qdf_mem_zero(dp_soc_offset, len); + for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) + soc->tx_ring_map[i] = 0; } /** @@ -4795,92 +4066,50 @@ static void dp_soc_mem_reset(struct dp_soc *soc) static void dp_soc_deinit(void *txrx_soc) { struct dp_soc *soc = (struct dp_soc *)txrx_soc; - int i; + struct htt_soc *htt_soc = soc->htt_handle; qdf_atomic_set(&soc->cmn_init_done, 0); - for (i = 0; i < MAX_PDEV_CNT; i++) { - if (soc->pdev_list[i]) - dp_pdev_deinit((struct cdp_pdev *) - soc->pdev_list[i], 1); + /* free peer tables & AST tables allocated during peer_map_attach */ + if (soc->peer_map_attach_success) { + dp_peer_find_detach(soc); + soc->peer_map_attach_success = FALSE; } qdf_flush_work(&soc->htt_stats.work); qdf_disable_work(&soc->htt_stats.work); - /* Free pending htt stats messages */ - qdf_nbuf_queue_free(&soc->htt_stats.msg); - - dp_peer_find_detach(soc); - - /* de-initialize srng rings */ - /* Common rings */ - dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0); - - dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0); - - /* Tx data rings */ - if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { - for (i = 0; i < soc->num_tcl_data_rings; i++) { - dp_tx_deinit_pair_by_index(soc, i); - } - if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) - dp_tx_deinit_pair_by_index(soc, IPA_TCL_DATA_RING_IDX); - } - - /* TCL command/credit ring */ - dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT, 0); - /* TCL status ring */ - dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0); - - /* Rx data rings */ - if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { - soc->num_reo_dest_rings = - wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); - for (i = 0; i < soc->num_reo_dest_rings; i++) { - /* TODO: Get number of rings and ring sizes - * from wlan_cfg - */ - dp_srng_deinit(soc, &soc->reo_dest_ring[i], - REO_DST, i); - } - } - /* REO reinjection ring */ - dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0); - - /* Rx release ring */ - dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0); - - /* Rx exception ring */ - /* TODO: Better to store ring_type and ring_num in - * dp_srng during setup - */ - dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0); - - /* REO command and status rings */ - dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0); - dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0); - - dp_soc_wds_detach(soc); - - qdf_spinlock_destroy(&soc->peer_ref_mutex); qdf_spinlock_destroy(&soc->htt_stats.lock); - htt_soc_htc_dealloc(soc->htt_handle); + dp_soc_reset_txrx_ring_map(soc); dp_reo_desc_freelist_destroy(soc); - qdf_spinlock_destroy(&soc->ast_lock); - DEINIT_RX_HW_STATS_LOCK(soc); - dp_soc_mem_reset(soc); -} + qdf_spinlock_destroy(&soc->ast_lock); + qdf_spinlock_destroy(&soc->peer_ref_mutex); -void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index) -{ - dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA, index); - dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, index); + qdf_nbuf_queue_free(&soc->htt_stats.msg); + + dp_soc_wds_detach(soc); + + qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock); + + dp_reo_cmdlist_destroy(soc); + qdf_spinlock_destroy(&soc->rx.reo_cmd_lock); + + dp_soc_tx_desc_sw_pools_deinit(soc); + + dp_soc_srng_deinit(soc); + + dp_hw_link_desc_ring_deinit(soc); + + htt_soc_htc_dealloc(soc->htt_handle); + + htt_soc_detach(htt_soc); + + wlan_minidump_remove(soc); } /** @@ -4891,9 +4120,6 @@ void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index) */ static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc) { - struct dp_soc *soc = (struct dp_soc *)txrx_soc; - - soc->dp_soc_reinit = 1; dp_soc_deinit(txrx_soc); } @@ -4906,96 +4132,13 @@ static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc) static void dp_soc_detach(struct cdp_soc_t *txrx_soc) { struct dp_soc *soc = (struct dp_soc *)txrx_soc; - int i; - - qdf_atomic_set(&soc->cmn_init_done, 0); - - /* TBD: Call Tx and Rx cleanup functions to free buffers and - * SW descriptors - */ - - for (i = 0; i < MAX_PDEV_CNT; i++) { - if (soc->pdev_list[i]) - dp_pdev_detach((struct cdp_pdev *) - soc->pdev_list[i], 1); - } - - /* Free the ring memories */ - /* Common rings */ - wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0); - - if (dp_is_soc_reinit(soc)) { - dp_tx_soc_detach(soc); - } - - /* Tx data rings */ - if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { - for (i = 0; i < soc->num_tcl_data_rings; i++) { - wlan_minidump_remove(soc->tcl_data_ring[i].base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->tcl_data_ring[i], - TCL_DATA, i); - wlan_minidump_remove(soc->tx_comp_ring[i].base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->tx_comp_ring[i], - WBM2SW_RELEASE, i); - } - if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { - dp_srng_cleanup(soc, &soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX], - TCL_DATA, IPA_TCL_DATA_RING_IDX); - dp_srng_cleanup(soc, &soc->tx_comp_ring[IPA_TCL_DATA_RING_IDX], - WBM2SW_RELEASE, IPA_TCL_DATA_RING_IDX); - } - } - - /* TCL command/credit ring */ - wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT, 0); - /* TCL status rings */ - wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0); - - /* Rx data rings */ - if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) { - soc->num_reo_dest_rings = - wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); - for (i = 0; i < soc->num_reo_dest_rings; i++) { - /* TODO: Get number of rings and ring sizes - * from wlan_cfg - */ - wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->reo_dest_ring[i], - REO_DST, i); - } - } - /* REO reinjection ring */ - wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0); - - /* Rx release ring */ - wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0); - dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3); - - /* Rx exception ring */ - /* TODO: Better to store ring_type and ring_num in - * dp_srng during setup - */ - wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0); - - /* REO command and status rings */ - wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0); - wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned); - dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0); - dp_hw_link_desc_pool_cleanup(soc, WLAN_INVALID_PDEV_ID); - - htt_soc_detach(soc->htt_handle); - soc->dp_soc_reinit = 0; + dp_soc_tx_desc_sw_pools_free(soc); + dp_soc_srng_free(soc); + dp_hw_link_desc_ring_free(soc); + dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID); wlan_cfg_soc_detach(soc->wlan_cfg_ctx); - wlan_minidump_remove(soc); qdf_mem_free(soc); } @@ -5007,14 +4150,7 @@ static void dp_soc_detach(struct cdp_soc_t *txrx_soc) */ static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc) { - struct dp_soc *soc = (struct dp_soc *)txrx_soc; - - if (dp_is_soc_reinit(soc)) { - dp_soc_detach(txrx_soc); - } else { - dp_soc_deinit(txrx_soc); - dp_soc_detach(txrx_soc); - } + dp_soc_detach(txrx_soc); } #if !defined(DISABLE_MON_CONFIG) @@ -5481,12 +4617,6 @@ dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc) /* initialize work queue for stats processing */ qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); - wlan_minidump_log(soc, - sizeof(*soc), - soc->ctrl_psoc, - WLAN_MD_DP_SOC, - "dp_soc"); - return QDF_STATUS_SUCCESS; } @@ -5545,17 +4675,16 @@ static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc, qdf_mem_free(vdev); goto fail0; } - wlan_minidump_log(vdev, - sizeof(*vdev), - soc->ctrl_psoc, - WLAN_MD_DP_VDEV, - "dp_vdev"); + if (!vdev) { QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, FL("DP VDEV memory allocation failed")); goto fail0; } + wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc, + WLAN_MD_DP_VDEV, "dp_vdev"); + vdev->pdev = pdev; vdev->vdev_id = vdev_id; vdev->opmode = op_mode; @@ -5888,6 +5017,8 @@ static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc, qdf_spin_unlock_bh(&pdev->vdev_list_lock); dp_tx_vdev_detach(vdev); + wlan_minidump_remove(vdev); + free_vdev: if (wlan_op_mode_monitor == vdev->opmode) { if (soc->intr_mode == DP_INTR_POLL) @@ -7308,6 +6439,8 @@ static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t special_monitor) { + uint32_t mac_id; + uint32_t mac_for_pdev; struct dp_pdev *pdev; struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc, @@ -7341,7 +6474,13 @@ static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc, } pdev->monitor_configured = true; - dp_mon_buf_delayed_replenish(pdev); + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { + mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, + pdev->pdev_id); + dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, + FALSE); + } dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE); @@ -10210,6 +9349,7 @@ static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t *soc_hdl, return QDF_STATUS_E_FAILURE; soc->is_peer_map_unmap_v2 = peer_map_unmap_v2; + soc->peer_map_attach_success = TRUE; return QDF_STATUS_SUCCESS; } @@ -10511,6 +9651,7 @@ static struct cdp_cmn_ops dp_ops_cmn = { .txrx_soc_init = dp_soc_init_wifi3, .txrx_tso_soc_attach = dp_tso_soc_attach, .txrx_tso_soc_detach = dp_tso_soc_detach, + .txrx_pdev_init = dp_pdev_init_wifi3, .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3, .txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3, .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3, @@ -11336,40 +10477,6 @@ void dp_soc_set_txrx_ring_map(struct dp_soc *soc) #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \ defined(QCA_WIFI_QCA5018) - -#ifndef QCA_MEM_ATTACH_ON_WIFI3 - -/** - * dp_soc_attach_wifi3() - Attach txrx SOC - * @ctrl_psoc: Opaque SOC handle from control plane - * @htc_handle: Opaque HTC handle - * @hif_handle: Opaque HIF handle - * @qdf_osdev: QDF device - * @ol_ops: Offload Operations - * @device_id: Device ID - * - * Return: DP SOC handle on success, NULL on failure - */ -struct cdp_soc_t * -dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, - struct hif_opaque_softc *hif_handle, - HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, - struct ol_if_ops *ol_ops, uint16_t device_id) -{ - struct dp_soc *dp_soc = NULL; - - dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev, - ol_ops, device_id); - if (!dp_soc) - return NULL; - - if (!dp_soc_init(dp_soc, htc_handle, hif_handle)) - return NULL; - - return dp_soc_to_cdp_soc_t(dp_soc); -} -#else - /** * dp_soc_attach_wifi3() - Attach txrx SOC * @ctrl_psoc: Opaque SOC handle from control plane @@ -11389,13 +10496,11 @@ dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, { struct dp_soc *dp_soc = NULL; - dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev, + dp_soc = dp_soc_attach(ctrl_psoc, hif_handle, htc_handle, qdf_osdev, ol_ops, device_id); return dp_soc_to_cdp_soc_t(dp_soc); } -#endif - static inline void dp_soc_set_def_pdev(struct dp_soc *soc) { int lmac_id; @@ -11410,6 +10515,7 @@ static inline void dp_soc_set_def_pdev(struct dp_soc *soc) /** * dp_soc_attach() - Attach txrx SOC * @ctrl_psoc: Opaque SOC handle from control plane + * @hif_handle: Opaque HIF handle * @htc_handle: Opaque HTC handle * @qdf_osdev: QDF device * @ol_ops: Offload Operations @@ -11419,20 +10525,29 @@ static inline void dp_soc_set_def_pdev(struct dp_soc *soc) */ static struct dp_soc * dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, - HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, - struct ol_if_ops *ol_ops, uint16_t device_id) + struct hif_opaque_softc *hif_handle, HTC_HANDLE htc_handle, + qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops, + uint16_t device_id) { int int_ctx; struct dp_soc *soc = NULL; - struct htt_soc *htt_soc; + + if (!hif_handle) { + dp_err("HIF handle is NULL"); + goto fail0; + } soc = qdf_mem_malloc(sizeof(*soc)); - if (!soc) { dp_err("DP SOC memory allocation failed"); goto fail0; } + soc->hif_handle = hif_handle; + soc->hal_soc = hif_get_hal_handle(soc->hif_handle); + if (!soc->hal_soc) + goto fail1; + int_ctx = 0; soc->device_id = device_id; soc->cdp_soc.ops = &dp_txrx_ops; @@ -11442,31 +10557,46 @@ dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS; wlan_set_srng_cfg(&soc->wlan_srng_cfg); - qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map)); - soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc); if (!soc->wlan_cfg_ctx) { dp_err("wlan_cfg_ctx failed\n"); goto fail1; } - dp_soc_set_interrupt_mode(soc); + dp_soc_cfg_attach(soc); - htt_soc = htt_soc_attach(soc, htc_handle); - - if (!htt_soc) - goto fail1; - - soc->htt_handle = htt_soc; - - if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS) + if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) { + dp_err("failed to allocate link desc pool banks"); goto fail2; + } + if (dp_hw_link_desc_ring_alloc(soc)) { + dp_err("failed to allocate link_desc_ring"); + goto fail3; + } + + if (dp_soc_srng_alloc(soc)) { + dp_err("failed to allocate soc srng rings"); + goto fail4; + } + + if (dp_soc_tx_desc_sw_pools_alloc(soc)) { + dp_err("dp_soc_tx_desc_sw_pools_alloc failed"); + goto fail5; + } + + dp_soc_set_interrupt_mode(soc); dp_soc_set_def_pdev(soc); return soc; +fail5: + dp_soc_srng_free(soc); +fail4: + dp_hw_link_desc_ring_free(soc); +fail3: + dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID); fail2: - htt_soc_detach(htt_soc); + wlan_cfg_soc_detach(soc->wlan_cfg_ctx); fail1: qdf_mem_free(soc); fail0: @@ -11484,100 +10614,36 @@ fail0: void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, struct hif_opaque_softc *hif_handle) { - int target_type; struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle; bool is_monitor_mode = false; + struct hal_reo_params reo_params; + uint8_t i; + + wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc, + WLAN_MD_DP_SOC, "dp_soc"); + + htt_soc = htt_soc_attach(soc, htc_handle); + if (!htt_soc) + goto fail0; + + soc->htt_handle = htt_soc; + + if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS) + goto fail1; htt_set_htc_handle(htt_soc, htc_handle); soc->hif_handle = hif_handle; soc->hal_soc = hif_get_hal_handle(soc->hif_handle); if (!soc->hal_soc) - return NULL; + goto fail2; - htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, - htt_get_htc_handle(htt_soc), - soc->hal_soc, soc->osdev); - target_type = hal_get_target_type(soc->hal_soc); - switch (target_type) { - case TARGET_TYPE_QCA6290: - wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, - REO_DST_RING_SIZE_QCA6290); - soc->ast_override_support = 1; - soc->da_war_enabled = false; - break; -#if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \ - defined(QCA_WIFI_QCA6750) - case TARGET_TYPE_QCA6390: - case TARGET_TYPE_QCA6490: - case TARGET_TYPE_QCA6750: - wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, - REO_DST_RING_SIZE_QCA6290); - wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true); - soc->ast_override_support = 1; - if (soc->cdp_soc.ol_ops->get_con_mode && - soc->cdp_soc.ol_ops->get_con_mode() == - QDF_GLOBAL_MONITOR_MODE) { - int int_ctx; - - for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) { - soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0; - soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0; - } - } - soc->wlan_cfg_ctx->rxdma1_enable = 0; - break; -#endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */ - - case TARGET_TYPE_QCA8074: - wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, - REO_DST_RING_SIZE_QCA8074); - wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true); - soc->da_war_enabled = true; - soc->is_rx_fse_full_cache_invalidate_war_enabled = true; - break; - case TARGET_TYPE_QCA8074V2: - case TARGET_TYPE_QCA6018: - wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, - REO_DST_RING_SIZE_QCA8074); - wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); - soc->hw_nac_monitor_support = 1; - soc->ast_override_support = 1; - soc->per_tid_basize_max_tid = 8; - soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; - soc->da_war_enabled = false; - soc->is_rx_fse_full_cache_invalidate_war_enabled = true; - break; - case TARGET_TYPE_QCN9000: - wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, - REO_DST_RING_SIZE_QCN9000); - soc->ast_override_support = 1; - soc->da_war_enabled = false; - wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); - soc->hw_nac_monitor_support = 1; - soc->per_tid_basize_max_tid = 8; - soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; - soc->lmac_polled_mode = 0; - soc->wbm_release_desc_rx_sg_support = 1; - if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) - soc->full_mon_mode = true; - break; - case TARGET_TYPE_QCA5018: - wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, - REO_DST_RING_SIZE_QCA8074); - soc->ast_override_support = 1; - soc->da_war_enabled = false; - wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); - soc->hw_nac_monitor_support = 1; - soc->per_tid_basize_max_tid = 8; - soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; - break; - default: - qdf_print("%s: Unknown tgt type %d\n", __func__, target_type); - qdf_assert_always(0); - break; - } + dp_soc_cfg_init(soc); + /* Note: Any SRNG ring initialization should happen only after + * Interrupt mode is set and followed by filling up the + * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER. + */ dp_soc_set_interrupt_mode(soc); if (soc->cdp_soc.ol_ops->get_con_mode && soc->cdp_soc.ol_ops->get_con_mode() == @@ -11586,10 +10652,44 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, soc->intr_mode, is_monitor_mode); + + /* initialize WBM_IDLE_LINK ring */ + if (dp_hw_link_desc_ring_init(soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_hw_link_desc_ring_init failed")); + goto fail3; + } + + dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID); + + if (dp_soc_srng_init(soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_soc_srng_init failed")); + goto fail4; + } + + if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, + htt_get_htc_handle(htt_soc), + soc->hal_soc, soc->osdev) == NULL) + goto fail5; + + /* Initialize descriptors in TCL Rings */ + for (i = 0; i < soc->num_tcl_data_rings; i++) { + hal_tx_init_data_ring(soc->hal_soc, + soc->tcl_data_ring[i].hal_srng); + } + + if (dp_soc_tx_desc_sw_pools_init(soc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_tx_soc_attach failed")); + goto fail6; + } + wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx, cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH)); soc->cce_disable = false; + qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map)); qdf_atomic_init(&soc->num_tx_outstanding); soc->num_tx_allowed = wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx); @@ -11598,9 +10698,8 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc, CDP_CFG_MAX_PEER_ID); - if (ret != -EINVAL) { + if (ret != -EINVAL) wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret); - } ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc, CDP_CFG_CCE_DISABLE); @@ -11608,6 +10707,53 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, soc->cce_disable = true; } + /* + * Skip registering hw ring interrupts for WMAC2 on IPQ6018 + * WMAC2 is not there in IPQ6018 platform. + */ + if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018) + dp_soc_disable_mac2_intr_mask(soc); + + /* Setup HW REO */ + qdf_mem_zero(&reo_params, sizeof(reo_params)); + + if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) { + /* + * Reo ring remap is not required if both radios + * are offloaded to NSS + */ + if (dp_reo_remap_config(soc, + &reo_params.remap1, + &reo_params.remap2)) + reo_params.rx_hash_enabled = true; + else + reo_params.rx_hash_enabled = false; + } + + /* setup the global rx defrag waitlist */ + TAILQ_INIT(&soc->rx.defrag.waitlist); + soc->rx.defrag.timeout_ms = + wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx); + soc->rx.defrag.next_flush_ms = 0; + soc->rx.flags.defrag_timeout_check = + wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx); + qdf_spinlock_create(&soc->rx.defrag.defrag_lock); + + /* + * set the fragment destination ring + */ + dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring); + + hal_reo_setup(soc->hal_soc, &reo_params); + + hal_reo_set_err_dst_remap(soc->hal_soc); + + qdf_atomic_set(&soc->cmn_init_done, 1); + + dp_soc_wds_attach(soc); + + qdf_nbuf_queue_init(&soc->htt_stats.msg); + qdf_spinlock_create(&soc->peer_ref_mutex); qdf_spinlock_create(&soc->ast_lock); @@ -11623,7 +10769,20 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle, qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc); return soc; - +fail6: + htt_soc_htc_dealloc(soc->htt_handle); +fail5: + dp_soc_srng_deinit(soc); +fail4: + dp_hw_link_desc_ring_deinit(soc); +fail3: + dp_hw_link_desc_ring_free(soc); +fail2: + htt_htc_pkt_pool_free(htt_soc); +fail1: + htt_soc_detach(htt_soc); +fail0: + return NULL; } /** @@ -11907,17 +11066,6 @@ static bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev) return pdev->enable_reap_timer_non_pkt; } -/* -* dp_is_soc_reinit() - Check if soc reinit is true -* @soc: DP SoC context -* -* Return: true or false -*/ -bool dp_is_soc_reinit(struct dp_soc *soc) -{ - return soc->dp_soc_reinit; -} - /* * dp_set_pktlog_wifi3() - attach txrx vdev * @pdev: Datapath PDEV handle @@ -12287,3 +11435,928 @@ uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id, qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex); return new_mac_cnt; } + +/** + * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including + * monitor rings + * @pdev: Datapath pdev handle + * + */ +static void dp_pdev_srng_deinit(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + uint8_t i; + + dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], RXDMA_BUF, + pdev->lmac_id); + + if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX); + + for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id); + + wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id], + RXDMA_DST, lmac_id); + } + + dp_mon_rings_deinit(pdev); +} + +/** + * dp_pdev_srng_init() - initialize all pdev srng rings including + * monitor rings + * @pdev: Datapath pdev handle + * + * return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_NOMEM on failure + */ +static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + uint32_t i; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + + if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], + RXDMA_BUF, 0, pdev->lmac_id)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed rx refill ring")); + goto fail1; + } + + if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { + if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX)) + goto fail1; + } + + if (dp_mon_rings_init(soc, pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("MONITOR rings setup failed")); + goto fail1; + } + + /* LMAC RxDMA to SW Rings configuration */ + if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) + /* Only valid for MCL */ + pdev = soc->pdev_list[0]; + + for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id); + struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id]; + + if (srng->hal_srng) + continue; + + if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_err_dst_ring")); + goto fail1; + } + wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned, + soc->rxdma_err_dst_ring[lmac_id].alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_RXDMA_ERR_DST, + "rxdma_err_dst"); + } + return QDF_STATUS_SUCCESS; + +fail1: + dp_pdev_srng_deinit(pdev); + return QDF_STATUS_E_NOMEM; +} + +/** + * dp_pdev_srng_free() - free all pdev srng rings including monitor rings + * pdev: Datapath pdev handle + * + */ +static void dp_pdev_srng_free(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + uint8_t i; + + dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]); + dp_mon_rings_free(pdev); + + if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX); + + for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id); + + dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]); + } +} + +/** + * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including + * monitor rings + * pdev: Datapath pdev handle + * + * return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_NOMEM on failure + */ +static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev) +{ + struct dp_soc *soc = pdev->soc; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + uint32_t ring_size; + uint32_t i; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + + ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx); + if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], + RXDMA_BUF, ring_size, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed rx refill ring")); + goto fail1; + } + + if (dp_mon_rings_alloc(soc, pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("MONITOR rings setup failed")); + goto fail1; + } + + if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) { + if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX)) + goto fail1; + } + + ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx); + /* LMAC RxDMA to SW Rings configuration */ + if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) + /* Only valid for MCL */ + pdev = soc->pdev_list[0]; + + for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) { + int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id); + struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id]; + + if (srng->base_vaddr_unaligned) + continue; + + if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + FL(RNG_ERR "rxdma_err_dst_ring")); + goto fail1; + } + } + + return QDF_STATUS_SUCCESS; +fail1: + dp_pdev_srng_free(pdev); + return QDF_STATUS_E_NOMEM; +} + +/** + * dp_soc_srng_deinit() - de-initialize soc srng rings + * @soc: Datapath soc handle + * + */ +static void dp_soc_srng_deinit(struct dp_soc *soc) +{ + uint32_t i; + /* Free the ring memories */ + /* Common rings */ + wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0); + + /* Tx data rings */ + for (i = 0; i < soc->num_tcl_data_rings; i++) + dp_deinit_tx_pair_by_index(soc, i); + + /* TCL command and status rings */ + wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT, 0); + wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0); + + /* Rx data rings */ + soc->num_reo_dest_rings = + wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx); + for (i = 0; i < soc->num_reo_dest_rings; i++) { + /* TODO: Get number of rings and ring sizes + * from wlan_cfg + */ + wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i); + } + + /* REO reinjection ring */ + wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0); + + /* Rx release ring */ + wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0); + + /* Rx exception ring */ + /* TODO: Better to store ring_type and ring_num in + * dp_srng during setup + */ + wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0); + + /* REO command and status rings */ + wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0); + wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned); + dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0); +} + +/** + * dp_soc_srng_init() - Initialize soc level srng rings + * @soc: Datapath soc handle + * + * return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_FAILURE on failure + */ +static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc) +{ + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + uint32_t num_tcl_data_rings, num_reo_dest_rings; + uint8_t i; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + + dp_enable_verbose_debug(soc); + + /* WBM descriptor release ring */ + if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_init failed for wbm_desc_rel_ring")); + goto fail1; + } + + wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned, + soc->wbm_desc_rel_ring.alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_WBM_DESC_REL, + "wbm_desc_rel_ring"); + + /* TCL command and status rings */ + if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring, + TCL_CMD_CREDIT, 0, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_init failed for tcl_cmd_ring")); + goto fail1; + } + + wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned, + soc->tcl_cmd_credit_ring.alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_TCL_CMD, + "wbm_desc_rel_ring"); + + if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_init failed for tcl_status_ring")); + goto fail1; + } + + wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned, + soc->tcl_status_ring.alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_TCL_STATUS, + "wbm_desc_rel_ring"); + + /* REO reinjection ring */ + if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_init failed for reo_reinject_ring")); + goto fail1; + } + + wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned, + soc->reo_reinject_ring.alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_REO_REINJECT, + "reo_reinject_ring"); + + /* Rx release ring */ + if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_init failed for rx_rel_ring")); + goto fail1; + } + + wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned, + soc->rx_rel_ring.alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_RX_REL, + "reo_release_ring"); + + /* Rx exception ring */ + if (dp_srng_init(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0, + MAX_REO_DEST_RINGS)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_init failed for reo_exception_ring")); + goto fail1; + } + + wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned, + soc->reo_exception_ring.alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_REO_EXCEPTION, + "reo_exception_ring"); + + /* REO command and status rings */ + if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_init failed for reo_cmd_ring")); + goto fail1; + } + + wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned, + soc->reo_cmd_ring.alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_REO_CMD, + "reo_cmd_ring"); + + hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng); + TAILQ_INIT(&soc->rx.reo_cmd_list); + qdf_spinlock_create(&soc->rx.reo_cmd_lock); + + if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_init failed for reo_status_ring")); + goto fail1; + } + + wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned, + soc->reo_status_ring.alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_REO_STATUS, + "reo_status_ring"); + + num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx); + num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx); + + for (i = 0; i < num_tcl_data_rings; i++) { + if (dp_init_tx_ring_pair_by_index(soc, i)) + goto fail1; + } + + dp_create_ext_stats_event(soc); + + for (i = 0; i < num_reo_dest_rings; i++) { + /* Initialize REO destination ring */ + if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_init failed for reo_dest_ringn")); + goto fail1; + } + + wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned, + soc->reo_dest_ring[i].alloc_size, + soc->ctrl_psoc, + WLAN_MD_DP_SRNG_REO_DEST, + "reo_dest_ring"); + } + + return QDF_STATUS_SUCCESS; +fail1: + /* + * Cleanup will be done as part of soc_detach, which will + * be called on pdev attach failure + */ + dp_soc_srng_deinit(soc); + return QDF_STATUS_E_FAILURE; +} + +/** + * dp_soc_srng_free() - free soc level srng rings + * @soc: Datapath soc handle + * + */ +static void dp_soc_srng_free(struct dp_soc *soc) +{ + uint32_t i; + + dp_srng_free(soc, &soc->wbm_desc_rel_ring); + + for (i = 0; i < soc->num_tcl_data_rings; i++) + dp_free_tx_ring_pair_by_index(soc, i); + + dp_srng_free(soc, &soc->tcl_cmd_credit_ring); + dp_srng_free(soc, &soc->tcl_status_ring); + + for (i = 0; i < soc->num_reo_dest_rings; i++) + dp_srng_free(soc, &soc->reo_dest_ring[i]); + + dp_srng_free(soc, &soc->reo_reinject_ring); + dp_srng_free(soc, &soc->rx_rel_ring); + dp_srng_free(soc, &soc->reo_exception_ring); + dp_srng_free(soc, &soc->reo_cmd_ring); + dp_srng_free(soc, &soc->reo_status_ring); +} + +/** + * dp_soc_srng_alloc() - Allocate memory for soc level srng rings + * @soc: Datapath soc handle + * + * return: QDF_STATUS_SUCCESS on success + * QDF_STATUS_E_NOMEM on failure + */ +static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc) +{ + uint32_t entries; + uint32_t i; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + uint32_t num_tcl_data_rings, num_reo_dest_rings; + uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC; + uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + + /* sw2wbm link descriptor release ring */ + entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx); + if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for wbm_desc_rel_ring")); + goto fail1; + } + + entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx); + /* TCL command and status rings */ + if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_cmd_ring")); + goto fail1; + } + + entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx); + if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries, + 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for tcl_status_ring")); + goto fail1; + } + + /* REO reinjection ring */ + entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx); + if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_reinject_ring")); + goto fail1; + } + + /* Rx release ring */ + entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx); + if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for rx_rel_ring")); + goto fail1; + } + + /* Rx exception ring */ + entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx); + if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_exception_ring")); + goto fail1; + } + + /* REO command and status rings */ + entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx); + if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_cmd_ring")); + goto fail1; + } + + entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx); + if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS, + entries, 0)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_status_ring")); + goto fail1; + } + + num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx); + num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx); + tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx); + tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx); + reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx); + + /* Disable cached desc if NSS offload is enabled */ + if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) + cached = 0; + + for (i = 0; i < num_tcl_data_rings; i++) { + if (dp_alloc_tx_ring_pair_by_index(soc, i)) + goto fail1; + } + + soc->num_tcl_data_rings = num_tcl_data_rings; + + for (i = 0; i < num_reo_dest_rings; i++) { + /* Setup REO destination ring */ + if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST, + reo_dst_ring_size, cached)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_srng_setup failed for reo_dest_ring")); + goto fail1; + } + } + soc->num_reo_dest_rings = num_reo_dest_rings; + + return QDF_STATUS_SUCCESS; + +fail1: + dp_soc_srng_free(soc); + return QDF_STATUS_E_NOMEM; +} + +/** + * dp_soc_cfg_init() - initialize target specific configuration + * during dp_soc_init + * @soc: dp soc handle + */ +static void dp_soc_cfg_init(struct dp_soc *soc) +{ + int target_type; + + target_type = hal_get_target_type(soc->hal_soc); + switch (target_type) { + case TARGET_TYPE_QCA6290: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA6290); + soc->ast_override_support = 1; + soc->da_war_enabled = false; + break; + case TARGET_TYPE_QCA6390: + case TARGET_TYPE_QCA6490: + case TARGET_TYPE_QCA6750: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA6290); + wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true); + soc->ast_override_support = 1; + if (soc->cdp_soc.ol_ops->get_con_mode && + soc->cdp_soc.ol_ops->get_con_mode() == + QDF_GLOBAL_MONITOR_MODE) { + int int_ctx; + + for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) { + soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0; + soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0; + } + } + soc->wlan_cfg_ctx->rxdma1_enable = 0; + break; + case TARGET_TYPE_QCA8074: + wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, + MON_BUF_MIN_ENTRIES); + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA8074); + wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true); + soc->da_war_enabled = true; + soc->is_rx_fse_full_cache_invalidate_war_enabled = true; + break; + case TARGET_TYPE_QCA8074V2: + case TARGET_TYPE_QCA6018: + wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, + MON_BUF_MIN_ENTRIES); + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA8074); + wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); + soc->hw_nac_monitor_support = 1; + soc->ast_override_support = 1; + soc->per_tid_basize_max_tid = 8; + soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; + soc->da_war_enabled = false; + soc->is_rx_fse_full_cache_invalidate_war_enabled = true; + break; + case TARGET_TYPE_QCN9000: + wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx, + MON_BUF_MIN_ENTRIES); + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCN9000); + soc->ast_override_support = 1; + soc->da_war_enabled = false; + wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); + soc->hw_nac_monitor_support = 1; + soc->per_tid_basize_max_tid = 8; + soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; + soc->lmac_polled_mode = 0; + soc->wbm_release_desc_rx_sg_support = 1; + if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE)) + soc->full_mon_mode = true; + break; + case TARGET_TYPE_QCA5018: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA8074); + soc->ast_override_support = 1; + soc->da_war_enabled = false; + wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false); + soc->hw_nac_monitor_support = 1; + soc->per_tid_basize_max_tid = 8; + soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS; + break; + default: + qdf_print("%s: Unknown tgt type %d\n", __func__, target_type); + qdf_assert_always(0); + break; + } +} + +/** + * dp_soc_cfg_attach() - set target specific configuration in + * dp soc cfg. + * @soc: dp soc handle + */ +static void dp_soc_cfg_attach(struct dp_soc *soc) +{ + int target_type; + int nss_cfg = 0; + + target_type = hal_get_target_type(soc->hal_soc); + switch (target_type) { + case TARGET_TYPE_QCA6290: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA6290); + break; + case TARGET_TYPE_QCA6390: + case TARGET_TYPE_QCA6490: + case TARGET_TYPE_QCA6750: + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA6290); + soc->wlan_cfg_ctx->rxdma1_enable = 0; + break; + case TARGET_TYPE_QCA8074: + wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1); + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA8074); + break; + case TARGET_TYPE_QCA8074V2: + case TARGET_TYPE_QCA6018: + wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1); + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCA8074); + break; + case TARGET_TYPE_QCN9000: + wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1); + wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx, + REO_DST_RING_SIZE_QCN9000); + break; + default: + qdf_print("%s: Unknown tgt type %d\n", __func__, target_type); + qdf_assert_always(0); + break; + } + + if (soc->cdp_soc.ol_ops->get_soc_nss_cfg) + nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc); + + wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg); + + if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { + wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0); + wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0); + wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0); + wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0); + } +} + +static inline QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc, + HTC_HANDLE htc_handle, + qdf_device_t qdf_osdev, + uint8_t pdev_id) +{ + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + int nss_cfg; + void *sojourn_buf; + QDF_STATUS ret; + + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + struct dp_pdev *pdev = soc->pdev_list[pdev_id]; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + pdev->soc = soc; + pdev->pdev_id = pdev_id; + + pdev->filter = dp_mon_filter_alloc(pdev); + if (!pdev->filter) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Memory allocation failed for monitor filters")); + ret = QDF_STATUS_E_NOMEM; + goto fail0; + } + + /* + * Variable to prevent double pdev deinitialization during + * radio detach execution .i.e. in the absence of any vdev. + */ + pdev->pdev_deinit = 0; + + if (dp_wdi_event_attach(pdev)) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "dp_wdi_evet_attach failed"); + goto fail1; + } + + if (dp_pdev_srng_init(pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to initialize pdev srng rings")); + goto fail2; + } + + /* Initialize descriptors in TCL Rings used by IPA */ + if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) + hal_tx_init_data_ring(soc->hal_soc, + soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng); + + /* + * Initialize command/credit ring descriptor + * Command/CREDIT ring also used for sending DATA cmds + */ + hal_tx_init_cmd_credit_ring(soc->hal_soc, + soc->tcl_cmd_credit_ring.hal_srng); + + dp_tx_pdev_init(pdev); + /* + * Variable to prevent double pdev deinitialization during + * radio detach execution .i.e. in the absence of any vdev. + */ + pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer)); + + if (!pdev->invalid_peer) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Invalid peer memory allocation failed")); + goto fail3; + } + + /* + * set nss pdev config based on soc config + */ + nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx); + wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx, + (nss_cfg & (1 << pdev_id))); + + pdev->target_pdev_id = + dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id); + + if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB && + pdev->lmac_id == PHYB_2G_LMAC_ID) { + pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID; + } + + /* Reset the cpu ring map if radio is NSS offloaded */ + if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) { + dp_soc_reset_cpu_ring_map(soc); + dp_soc_reset_intr_mask(soc); + } + + TAILQ_INIT(&pdev->vdev_list); + qdf_spinlock_create(&pdev->vdev_list_lock); + pdev->vdev_count = 0; + + qdf_spinlock_create(&pdev->tx_mutex); + qdf_spinlock_create(&pdev->neighbour_peer_mutex); + TAILQ_INIT(&pdev->neighbour_peers_list); + pdev->neighbour_peers_added = false; + pdev->monitor_configured = false; + + DP_STATS_INIT(pdev); + + /* Monitor filter init */ + pdev->mon_filter_mode = MON_FILTER_ALL; + pdev->fp_mgmt_filter = FILTER_MGMT_ALL; + pdev->fp_ctrl_filter = FILTER_CTRL_ALL; + pdev->fp_data_filter = FILTER_DATA_ALL; + pdev->mo_mgmt_filter = FILTER_MGMT_ALL; + pdev->mo_ctrl_filter = FILTER_CTRL_ALL; + pdev->mo_data_filter = FILTER_DATA_ALL; + + dp_local_peer_id_pool_init(pdev); + + dp_dscp_tid_map_setup(pdev); + dp_pcp_tid_map_setup(pdev); + + /* set the reo destination during initialization */ + pdev->reo_dest = pdev->pdev_id + 1; + + /* + * initialize ppdu tlv list + */ + TAILQ_INIT(&pdev->ppdu_info_list); + pdev->tlv_count = 0; + pdev->list_depth = 0; + + qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats)); + + pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev, + sizeof(struct cdp_tx_sojourn_stats), 0, 4, + TRUE); + + if (!pdev->sojourn_buf) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Failed to allocate sojourn buf")); + goto fail4; + } + sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf); + qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats)); + + /* initlialize cal client timer */ + dp_cal_client_attach(&pdev->cal_client_ctx, + dp_pdev_to_cdp_pdev(pdev), + pdev->soc->osdev, + &dp_iterate_update_peer_list); + qdf_event_create(&pdev->fw_peer_stats_event); + + pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS) + goto fail5; + + if (dp_rxdma_ring_setup(soc, pdev)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("RXDMA ring config failed")); + goto fail6; + } + + if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) + goto fail7; + + if (dp_ipa_ring_resource_setup(soc, pdev)) + goto fail8; + + if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("dp_ipa_uc_attach failed")); + goto fail8; + } + + ret = dp_rx_fst_attach(soc, pdev); + if ((ret != QDF_STATUS_SUCCESS) && + (ret != QDF_STATUS_E_NOSUPPORT)) { + QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR, + "RX Flow Search Table attach failed: pdev %d err %d", + pdev_id, ret); + goto fail9; + } + + /* initialize sw rx descriptors */ + dp_rx_pdev_desc_pool_init(pdev); + /* initialize sw monitor rx descriptors */ + dp_rx_pdev_mon_desc_pool_init(pdev); + /* allocate buffers and replenish the RxDMA ring */ + dp_rx_pdev_buffers_alloc(pdev); + /* allocate buffers and replenish the monitor RxDMA ring */ + dp_rx_pdev_mon_buffers_alloc(pdev); + + dp_init_tso_stats(pdev); + dp_tx_ppdu_stats_attach(pdev); + + return QDF_STATUS_SUCCESS; +fail9: + dp_ipa_uc_detach(soc, pdev); +fail8: + dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev); +fail7: + dp_rxdma_ring_cleanup(soc, pdev); +fail6: + dp_htt_ppdu_stats_detach(pdev); +fail5: + qdf_nbuf_free(pdev->sojourn_buf); +fail4: + qdf_spinlock_destroy(&pdev->neighbour_peer_mutex); + qdf_spinlock_destroy(&pdev->tx_mutex); + qdf_spinlock_destroy(&pdev->vdev_list_lock); + qdf_mem_free(pdev->invalid_peer); +fail3: + dp_pdev_srng_deinit(pdev); +fail2: + dp_wdi_event_detach(pdev); +fail1: + dp_mon_filter_dealloc(pdev); +fail0: + return QDF_STATUS_E_FAILURE; +} + +/* + * dp_pdev_init_wifi3() - Init txrx pdev + * @htc_handle: HTC handle for host-target interface + * @qdf_osdev: QDF OS device + * @force: Force deinit + * + * Return: QDF_STATUS + */ +static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc, + HTC_HANDLE htc_handle, + qdf_device_t qdf_osdev, + uint8_t pdev_id) +{ + return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id); +} + diff --git a/dp/wifi3.0/dp_rx.c b/dp/wifi3.0/dp_rx.c index 6fafbb217c..29e0a5bec4 100644 --- a/dp/wifi3.0/dp_rx.c +++ b/dp/wifi3.0/dp_rx.c @@ -2488,21 +2488,6 @@ QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev) return QDF_STATUS_SUCCESS; } -/** - * dp_rx_pdev_detach() - detach dp rx - * @pdev: core txrx pdev context - * - * This function will detach DP RX into main device context - * will free DP Rx resources. - * - * Return: void - */ -void -dp_rx_pdev_detach(struct dp_pdev *pdev) -{ - dp_rx_pdev_desc_pool_free(pdev); -} - static QDF_STATUS dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf, struct dp_pdev *dp_pdev, @@ -2566,6 +2551,8 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, int page_idx, total_pages; union dp_rx_desc_list_elem_t *desc_list = NULL; union dp_rx_desc_list_elem_t *tail = NULL; + int sync_hw_ptr = 1; + uint32_t num_entries_avail; if (qdf_unlikely(!rxdma_srng)) { DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers); @@ -2574,6 +2561,20 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, dp_debug("requested %u RX buffers for driver attach", num_req_buffers); + hal_srng_access_start(dp_soc->hal_soc, rxdma_srng); + num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc, + rxdma_srng, + sync_hw_ptr); + hal_srng_access_end(dp_soc->hal_soc, rxdma_srng); + + if (!num_entries_avail) { + dp_err("Num of available entries is zero, nothing to do"); + return QDF_STATUS_E_NOMEM; + } + + if (num_entries_avail < num_req_buffers) + num_req_buffers = num_entries_avail; + nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool, num_req_buffers, &desc_list, &tail); if (!nr_descs) { @@ -2697,7 +2698,7 @@ dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, "nss-wifi<4> skip Rx refil %d", mac_for_pdev); - status = QDF_STATUS_SUCCESS; + return status; } dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev]; @@ -2712,10 +2713,6 @@ dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) if (status != QDF_STATUS_SUCCESS) return status; - rx_desc_pool->owner = DP_WBM2SW_RBM; - rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; - rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; - return status; } @@ -2770,6 +2767,10 @@ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) rx_sw_desc_weight = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx); + rx_desc_pool->owner = DP_WBM2SW_RBM; + rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; + rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; + dp_rx_desc_pool_init(soc, mac_for_pdev, rx_sw_desc_weight * rxdma_entries, rx_desc_pool); @@ -2794,50 +2795,6 @@ void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) dp_rx_desc_pool_deinit(soc, rx_desc_pool); } -/** - * dp_rx_attach() - attach DP RX - * @pdev: core txrx pdev context - * - * This function will attach a DP RX instance into the main - * device (SOC) context. Will allocate dp rx resource and - * initialize resources. - * - * Return: QDF_STATUS_SUCCESS: success - * QDF_STATUS_E_RESOURCES: Error return - */ -QDF_STATUS -dp_rx_pdev_attach(struct dp_pdev *pdev) -{ - struct dp_soc *soc = pdev->soc; - QDF_STATUS ret_val; - - if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, - "nss-wifi<4> skip Rx refil %d", - pdev->pdev_id); - return QDF_STATUS_SUCCESS; - } - - if (!dp_is_soc_reinit(soc)) { - ret_val = dp_rx_pdev_desc_pool_alloc(pdev); - if (ret_val != QDF_STATUS_SUCCESS) - return ret_val; - } - - dp_rx_pdev_desc_pool_init(pdev); - - ret_val = dp_rx_fst_attach(soc, pdev); - if ((ret_val != QDF_STATUS_SUCCESS) && - (ret_val != QDF_STATUS_E_NOSUPPORT)) { - QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR, - "RX Flow Search Table attach failed: pdev %d err %d", - pdev->pdev_id, ret_val); - return ret_val; - } - - return dp_rx_pdev_buffers_alloc(pdev); -} - /* * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring * diff --git a/dp/wifi3.0/dp_rx.h b/dp/wifi3.0/dp_rx.h index c617cd95f4..d89c0095d2 100644 --- a/dp/wifi3.0/dp_rx.h +++ b/dp/wifi3.0/dp_rx.h @@ -510,6 +510,7 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, uint32_t pool_size, struct rx_desc_pool *rx_desc_pool); +void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id); void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, union dp_rx_desc_list_elem_t **local_desc_list, diff --git a/dp/wifi3.0/dp_rx_desc.c b/dp/wifi3.0/dp_rx_desc.c index 210ab57da2..e97876232b 100644 --- a/dp/wifi3.0/dp_rx_desc.c +++ b/dp/wifi3.0/dp_rx_desc.c @@ -31,7 +31,7 @@ A_COMPILE_TIME_ASSERT(cookie_size_check, * * @rx_desc_pool: rx descriptor pool pointer * Return: QDF_STATUS QDF_STATUS_SUCCESS - * QDF_STATUS_E_NOMEM + * QDF_STATUS_E_NOMEM */ QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool) { @@ -233,7 +233,7 @@ void dp_rx_desc_pool_deinit(struct dp_soc *soc, * @rx_desc_pool: rx descriptor pool pointer * * Return: QDF_STATUS QDF_STATUS_SUCCESS - * QDF_STATUS_E_NOMEM + * QDF_STATUS_E_NOMEM */ QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool) { @@ -377,6 +377,7 @@ void dp_rx_desc_pool_deinit(struct dp_soc *soc, } #endif /* RX_DESC_MULTI_PAGE_ALLOC */ + /* * dp_rx_get_free_desc_list() - provide a list of descriptors from * the free rx desc pool. diff --git a/dp/wifi3.0/dp_rx_mon.h b/dp/wifi3.0/dp_rx_mon.h index 4faec924ca..0c6d6168aa 100644 --- a/dp/wifi3.0/dp_rx_mon.h +++ b/dp/wifi3.0/dp_rx_mon.h @@ -19,6 +19,8 @@ #ifndef _DP_RX_MON_H_ #define _DP_RX_MON_H_ +#define MON_BUF_MIN_ENTRIES 8 + /* * dp_rx_mon_status_process() - Process monitor status ring and *>.....TLV in status ring. @@ -47,10 +49,28 @@ dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota); void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota); -QDF_STATUS dp_rx_pdev_mon_attach(struct dp_pdev *pdev); -QDF_STATUS dp_rx_pdev_mon_detach(struct dp_pdev *pdev); -QDF_STATUS dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int mac_id); -QDF_STATUS dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id); +QDF_STATUS dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev); +QDF_STATUS dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev); +void dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev); +void dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev); +void dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev); +void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev); +void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id); + +QDF_STATUS dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, + uint32_t mac_id); +QDF_STATUS dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, + uint32_t mac_id); +void dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, + uint32_t mac_id); +void dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, + uint32_t mac_id); +void dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, + uint32_t mac_id); +void dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id); +QDF_STATUS +dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id, + bool delayed_replenish); #ifdef QCA_SUPPORT_FULL_MON diff --git a/dp/wifi3.0/dp_rx_mon_dest.c b/dp/wifi3.0/dp_rx_mon_dest.c index c3cb8bfaaf..4aec2cc2c5 100644 --- a/dp/wifi3.0/dp_rx_mon_dest.c +++ b/dp/wifi3.0/dp_rx_mon_dest.c @@ -1092,9 +1092,9 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) qdf_mem_zero(&(pdev->ppdu_info.rx_status), sizeof(pdev->ppdu_info.rx_status)); QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG, - "%s %d ppdu_id %x != ppdu_info.com_info .ppdu_id %x", - __func__, __LINE__, - ppdu_id, pdev->ppdu_info.com_info.ppdu_id); + "%s %d ppdu_id %x != ppdu_info.com_info.ppdu_id %x", + __func__, __LINE__, + ppdu_id, pdev->ppdu_info.com_info.ppdu_id); break; } @@ -1121,27 +1121,208 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) } } -#ifndef DISABLE_MON_CONFIG -#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \ - !defined(QCA_WIFI_QCA6750) -/** - * dp_rx_pdev_mon_buf_attach() - Allocate the monitor descriptor pool - * - * @pdev: physical device handle - * @mac_id: mac id - * - * Return: QDF_STATUS - */ -#define MON_BUF_MIN_ALLOC_ENTRIES 128 -static QDF_STATUS -dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) { +QDF_STATUS +dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id, + bool delayed_replenish) +{ uint8_t pdev_id = pdev->pdev_id; struct dp_soc *soc = pdev->soc; struct dp_srng *mon_buf_ring; uint32_t num_entries; struct rx_desc_pool *rx_desc_pool; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; + + mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id]; + + num_entries = mon_buf_ring->num_entries; + + rx_desc_pool = &soc->rx_desc_mon[mac_id]; + + dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries); + + /* Replenish RXDMA monitor buffer ring with 8 buffers only + * delayed_replenish_entries is actually 8 but when we call + * dp_pdev_rx_buffers_attach() we pass 1 less than 8, hence + * added 1 to delayed_replenish_entries to ensure we have 8 + * entries. Once the monitor VAP is configured we replenish + * the complete RXDMA monitor buffer ring. + */ + if (delayed_replenish) + num_entries = soc_cfg_ctx->delayed_replenish_entries + 1; + else + num_entries -= soc_cfg_ctx->delayed_replenish_entries; + + return dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring, + rx_desc_pool, num_entries - 1); +} + +static QDF_STATUS +dp_rx_pdev_mon_cmn_buffers_alloc(struct dp_pdev *pdev, int mac_id) +{ + struct dp_soc *soc = pdev->soc; + uint8_t pdev_id = pdev->pdev_id; + int mac_for_pdev; + bool delayed_replenish; QDF_STATUS status = QDF_STATUS_SUCCESS; - uint32_t rx_desc_pool_size, replenish_size; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; + + delayed_replenish = soc_cfg_ctx->delayed_replenish_entries ? 1 : 0; + mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, pdev_id); + status = dp_rx_pdev_mon_status_buffers_alloc(pdev, mac_for_pdev); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dp_err("%s: dp_rx_pdev_mon_status_desc_pool_alloc() failed", + __func__); + goto fail; + } + + if (!soc->wlan_cfg_ctx->rxdma1_enable) + return status; + + status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev, + delayed_replenish); + if (!QDF_IS_STATUS_SUCCESS(status)) { + dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n", + __func__); + goto mon_stat_buf_dealloc; + } + + return status; + +mon_stat_buf_dealloc: + dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev); +fail: + return status; +} + +static void +dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct dp_srng *mon_buf_ring; + uint32_t num_entries; + struct rx_desc_pool *rx_desc_pool; + uint32_t rx_desc_pool_size; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; + + mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id]; + + num_entries = mon_buf_ring->num_entries; + + rx_desc_pool = &soc->rx_desc_mon[mac_id]; + + dp_debug("Mon RX Desc buf Pool[%d] init entries=%u", + pdev_id, num_entries); + + rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) * + num_entries; + + rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM; + rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; + rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT; + + dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool); + + pdev->mon_last_linkdesc_paddr = 0; + + pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; + qdf_spinlock_create(&pdev->mon_lock); +} + +static void +dp_rx_pdev_mon_cmn_desc_pool_init(struct dp_pdev *pdev, int mac_id) +{ + struct dp_soc *soc = pdev->soc; + uint32_t mac_for_pdev; + + mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id); + dp_rx_pdev_mon_status_desc_pool_init(pdev, mac_for_pdev); + + if (!soc->wlan_cfg_ctx->rxdma1_enable) + return; + + dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev); + dp_link_desc_ring_replenish(soc, mac_for_pdev); +} + +static void +dp_rx_pdev_mon_buf_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_mon[mac_id]; + + dp_debug("Mon RX Desc buf Pool[%d] deinit", pdev_id); + + dp_rx_desc_pool_deinit(soc, rx_desc_pool); + qdf_spinlock_destroy(&pdev->mon_lock); +} + +static void +dp_rx_pdev_mon_cmn_desc_pool_deinit(struct dp_pdev *pdev, int mac_id) +{ + struct dp_soc *soc = pdev->soc; + uint8_t pdev_id = pdev->pdev_id; + int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); + + dp_rx_pdev_mon_status_desc_pool_deinit(pdev, mac_for_pdev); + if (!soc->wlan_cfg_ctx->rxdma1_enable) + return; + + dp_rx_pdev_mon_buf_desc_pool_deinit(pdev, mac_for_pdev); +} + +static void +dp_rx_pdev_mon_buf_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_mon[mac_id]; + + dp_debug("Mon RX Buf Desc Pool Free pdev[%d]", pdev_id); + + dp_rx_desc_pool_free(soc, rx_desc_pool); +} + +static void +dp_rx_pdev_mon_cmn_desc_pool_free(struct dp_pdev *pdev, int mac_id) +{ + struct dp_soc *soc = pdev->soc; + uint8_t pdev_id = pdev->pdev_id; + int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); + + dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev); + dp_rx_pdev_mon_buf_desc_pool_free(pdev, mac_for_pdev); + dp_hw_link_desc_pool_banks_free(soc, mac_for_pdev); +} + +void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_mon[mac_id]; + + dp_debug("Mon RX Buf buffers Free pdev[%d]", pdev_id); + + dp_rx_desc_nbuf_free(soc, rx_desc_pool); +} + +static QDF_STATUS +dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct dp_srng *mon_buf_ring; + uint32_t num_entries; + struct rx_desc_pool *rx_desc_pool; + uint32_t rx_desc_pool_size; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx; mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id]; @@ -1151,320 +1332,147 @@ dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) { dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries); - rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx) * num_entries; - if (!dp_is_soc_reinit(soc)) { - status = dp_rx_desc_pool_alloc(soc, rx_desc_pool_size, - rx_desc_pool); - if (!QDF_IS_STATUS_SUCCESS(status)) - return status; - } + rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) * + num_entries; - rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM; - rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE; - rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT; - - replenish_size = ((num_entries - 1) < MON_BUF_MIN_ALLOC_ENTRIES) ? - (num_entries - 1) : MON_BUF_MIN_ALLOC_ENTRIES; - - dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool); - - status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring, - rx_desc_pool, replenish_size); - - return status; + return dp_rx_desc_pool_alloc(soc, rx_desc_pool_size, rx_desc_pool); } static QDF_STATUS -dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id) +dp_rx_pdev_mon_cmn_desc_pool_alloc(struct dp_pdev *pdev, int mac_id) { struct dp_soc *soc = pdev->soc; - struct rx_desc_pool *rx_desc_pool; - - rx_desc_pool = &soc->rx_desc_mon[mac_id]; - if (rx_desc_pool->pool_size != 0) { - if (!dp_is_soc_reinit(soc)) - dp_rx_desc_nbuf_and_pool_free(soc, mac_id, - rx_desc_pool); - else - dp_rx_desc_nbuf_free(soc, rx_desc_pool); - } - - return QDF_STATUS_SUCCESS; -} - -/** - * dp_mon_link_desc_pool_setup(): Allocate and setup link descriptor pool - * that will be used by HW for various link - * and queue descriptorsand managed by WBM - * - * @soc: soc handle - * @mac_id: mac id - * - * Return: QDF_STATUS - */ -static -QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t lmac_id) -{ - if (!dp_is_soc_reinit(soc)) - if (dp_hw_link_desc_pool_banks_alloc(soc, lmac_id)) - return QDF_STATUS_E_FAILURE; - - dp_link_desc_ring_replenish(soc, lmac_id); - - return QDF_STATUS_SUCCESS; -} - -/* - * Free link descriptor pool that was setup HW - */ -static -void dp_mon_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id) -{ - dp_hw_link_desc_pool_banks_free(soc, mac_id); -} - -/** - * dp_mon_buf_delayed_replenish() - Helper routine to replenish monitor dest buf - * @pdev: DP pdev object - * - * Return: None - */ -void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev) -{ - struct dp_soc *soc; + uint8_t pdev_id = pdev->pdev_id; uint32_t mac_for_pdev; - union dp_rx_desc_list_elem_t *tail = NULL; - union dp_rx_desc_list_elem_t *desc_list = NULL; - uint32_t num_entries; - uint32_t id; - - soc = pdev->soc; - num_entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev->wlan_cfg_ctx); - - for (id = 0; id < NUM_RXDMA_RINGS_PER_PDEV; id++) { - /* - * Get mac_for_pdev appropriately for both MCL & WIN, - * since MCL have multiple mon buf rings and WIN just - * has one mon buffer ring mapped per pdev, below API - * helps identify accurate buffer_ring for both cases - * - */ - mac_for_pdev = - dp_get_lmac_id_for_pdev_id(soc, id, pdev->pdev_id); - - dp_rx_buffers_replenish(soc, mac_for_pdev, - dp_rxdma_get_mon_buf_ring(pdev, - mac_for_pdev), - dp_rx_get_mon_desc_pool(soc, - mac_for_pdev, - pdev->pdev_id), - num_entries, &desc_list, &tail); - } -} -#else -static -QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id) -{ - return QDF_STATUS_SUCCESS; -} - -static QDF_STATUS -dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) -{ - return QDF_STATUS_SUCCESS; -} - -static -void dp_mon_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id) -{ -} - -static QDF_STATUS -dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id) -{ - return QDF_STATUS_SUCCESS; -} - -void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev) -{} -#endif - -/** - * dp_rx_pdev_mon_cmn_detach() - detach dp rx for monitor mode - * @pdev: core txrx pdev context - * @mac_id: mac_id for which deinit is to be done - * - * This function will free DP Rx resources for - * monitor mode - * - * Return: QDF_STATUS_SUCCESS: success - * QDF_STATUS_E_RESOURCES: Error return - */ -static QDF_STATUS -dp_rx_pdev_mon_cmn_detach(struct dp_pdev *pdev, int mac_id) { - struct dp_soc *soc = pdev->soc; - uint8_t pdev_id = pdev->pdev_id; - int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); - - dp_mon_link_desc_pool_cleanup(soc, lmac_id); - dp_rx_pdev_mon_status_detach(pdev, lmac_id); - dp_rx_pdev_mon_buf_detach(pdev, lmac_id); - - return QDF_STATUS_SUCCESS; -} - -/** - * dp_rx_pdev_mon_cmn_attach() - attach DP RX for monitor mode - * @pdev: core txrx pdev context - * @mac_id: mac_id for which init is to be done - * - * This function Will allocate dp rx resource and - * initialize resources for monitor mode. - * - * Return: QDF_STATUS_SUCCESS: success - * QDF_STATUS_E_RESOURCES: Error return - */ -static QDF_STATUS -dp_rx_pdev_mon_cmn_attach(struct dp_pdev *pdev, int mac_id) { - struct dp_soc *soc = pdev->soc; - uint8_t pdev_id = pdev->pdev_id; - int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); QDF_STATUS status; - status = dp_rx_pdev_mon_buf_attach(pdev, lmac_id); + mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); + + /* Allocate sw rx descriptor pool for monitor status ring */ + status = dp_rx_pdev_mon_status_desc_pool_alloc(pdev, mac_for_pdev); if (!QDF_IS_STATUS_SUCCESS(status)) { - dp_err("%s: dp_rx_pdev_mon_buf_attach() failed\n", __func__); + dp_err("%s: dp_rx_pdev_mon_status_desc_pool_alloc() failed", + __func__); goto fail; } - status = dp_rx_pdev_mon_status_attach(pdev, lmac_id); + if (!soc->wlan_cfg_ctx->rxdma1_enable) + return status; + + /* Allocate sw rx descriptor pool for monitor RxDMA buffer ring */ + status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev); if (!QDF_IS_STATUS_SUCCESS(status)) { - dp_err("%s: dp_rx_pdev_mon_status_attach() failed", __func__); - goto mon_buf_detach; + dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n", + __func__); + goto mon_status_dealloc; } - status = dp_mon_link_desc_pool_setup(soc, lmac_id); + /* Allocate link descriptors for the monitor link descriptor ring */ + status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev); if (!QDF_IS_STATUS_SUCCESS(status)) { - dp_err("%s: dp_mon_link_desc_pool_setup() failed", __func__); - goto mon_status_detach; + dp_err("%s: dp_hw_link_desc_pool_banks_alloc() failed", + __func__); + goto mon_buf_dealloc; } - return status; -mon_status_detach: - dp_rx_pdev_mon_status_detach(pdev, lmac_id); - -mon_buf_detach: - dp_rx_pdev_mon_buf_detach(pdev, lmac_id); - +mon_buf_dealloc: + dp_rx_pdev_mon_buf_desc_pool_free(pdev, mac_for_pdev); +mon_status_dealloc: + dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev); fail: return status; } -/** - * dp_rx_pdev_mon_attach() - attach DP RX for monitor mode - * @pdev: core txrx pdev context - * - * This function will attach a DP RX for monitor mode instance into - * the main device (SOC) context. Will allocate dp rx resource and - * initialize resources. - * - * Return: QDF_STATUS_SUCCESS: success - * QDF_STATUS_E_RESOURCES: Error return - */ -QDF_STATUS -dp_rx_pdev_mon_attach(struct dp_pdev *pdev) { - QDF_STATUS status; - uint8_t pdev_id = pdev->pdev_id; - int mac_id; - - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN, - "%s: pdev attach id=%d", __func__, pdev_id); - - for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { - status = dp_rx_pdev_mon_cmn_attach(pdev, mac_id); - if (!QDF_IS_STATUS_SUCCESS(status)) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - "%s: dp_rx_pdev_mon_cmn_attach(%d) failed\n", - __func__, mac_id); - goto fail; - } - } - pdev->mon_last_linkdesc_paddr = 0; - pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1; - qdf_spinlock_create(&pdev->mon_lock); - - /* Attach full monitor mode resources */ - dp_full_mon_attach(pdev); - return QDF_STATUS_SUCCESS; - -fail: - for (mac_id = mac_id - 1; mac_id >= 0; mac_id--) - dp_rx_pdev_mon_cmn_detach(pdev, mac_id); - - return status; -} - -QDF_STATUS -dp_mon_link_free(struct dp_pdev *pdev) { +static void +dp_rx_pdev_mon_cmn_buffers_free(struct dp_pdev *pdev, int mac_id) +{ uint8_t pdev_id = pdev->pdev_id; struct dp_soc *soc = pdev->soc; - int mac_id, lmac_id; + int mac_for_pdev; + + mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, pdev_id); + dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev); + + if (!soc->wlan_cfg_ctx->rxdma1_enable) + return; + + dp_rx_pdev_mon_buf_buffers_free(pdev, mac_for_pdev); +} + +QDF_STATUS +dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev) +{ + QDF_STATUS status; + int mac_id, count; for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { - lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id); - dp_mon_link_desc_pool_cleanup(soc, lmac_id); - } + status = dp_rx_pdev_mon_cmn_desc_pool_alloc(pdev, mac_id); + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, "%s: %d failed\n", + __func__, mac_id); - return QDF_STATUS_SUCCESS; + for (count = 0; count < mac_id; count++) + dp_rx_pdev_mon_cmn_desc_pool_free(pdev, count); + + return status; + } + } + return status; } -/** - * dp_rx_pdev_mon_detach() - detach dp rx for monitor mode - * @pdev: core txrx pdev context - * - * This function will detach DP RX for monitor mode from - * main device context. will free DP Rx resources for - * monitor mode - * - * Return: QDF_STATUS_SUCCESS: success - * QDF_STATUS_E_RESOURCES: Error return - */ -QDF_STATUS -dp_rx_pdev_mon_detach(struct dp_pdev *pdev) { - uint8_t pdev_id = pdev->pdev_id; + +void +dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev) +{ int mac_id; - qdf_spinlock_destroy(&pdev->mon_lock); + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) + dp_rx_pdev_mon_cmn_desc_pool_init(pdev, mac_id); +} + +void +dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev) +{ + int mac_id; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) + dp_rx_pdev_mon_cmn_desc_pool_deinit(pdev, mac_id); +} + +void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev) +{ + int mac_id; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) + dp_rx_pdev_mon_cmn_desc_pool_free(pdev, mac_id); +} + +void +dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev) +{ + int mac_id; + + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) + dp_rx_pdev_mon_cmn_buffers_free(pdev, mac_id); +} + +QDF_STATUS +dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev) +{ + int mac_id; + QDF_STATUS status; + for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) { - int mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, - mac_id, pdev_id); - - dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev); - dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev); + status = dp_rx_pdev_mon_cmn_buffers_alloc(pdev, mac_id); + if (!QDF_IS_STATUS_SUCCESS(status)) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, "%s: %d failed\n", + __func__, mac_id); + return status; + } } - /* Detach full monitor mode resources */ - dp_full_mon_detach(pdev); - - return QDF_STATUS_SUCCESS; -} -#else -QDF_STATUS -dp_rx_pdev_mon_attach(struct dp_pdev *pdev) { - return QDF_STATUS_SUCCESS; + return status; } -QDF_STATUS -dp_rx_pdev_mon_detach(struct dp_pdev *pdev) { - return QDF_STATUS_SUCCESS; -} - -QDF_STATUS -dp_mon_link_free(struct dp_pdev *pdev) { - return QDF_STATUS_SUCCESS; -} - -void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev) -{} -#endif /* DISABLE_MON_CONFIG */ diff --git a/dp/wifi3.0/dp_rx_mon_status.c b/dp/wifi3.0/dp_rx_mon_status.c index 3fe0240398..2cce5925e1 100644 --- a/dp/wifi3.0/dp_rx_mon_status.c +++ b/dp/wifi3.0/dp_rx_mon_status.c @@ -34,6 +34,16 @@ #include "dp_ratetable.h" #endif +static inline +QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, + uint32_t mac_id, + struct dp_srng *dp_rxdma_srng, + struct rx_desc_pool *rx_desc_pool, + uint32_t num_req_buffers, + union dp_rx_desc_list_elem_t **desc_list, + union dp_rx_desc_list_elem_t **tail, + uint8_t owner); + static inline void dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev, struct hal_rx_ppdu_info *ppdu_info, @@ -1903,6 +1913,7 @@ done: return work_done; } + /* * dp_rx_mon_status_process() - Process monitor status ring and * TLV in status ring. @@ -1923,6 +1934,7 @@ dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) { return work_done; } + /** * dp_mon_process() - Main monitor mode processing roution. * This call monitor status ring process then monitor @@ -1942,34 +1954,143 @@ dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) { return dp_rx_mon_status_process(soc, mac_id, quota); } -/** - * dp_rx_pdev_mon_status_detach() - detach dp rx for status ring - * @pdev: core txrx pdev context - * @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN - * - * This function will detach DP RX status ring from - * main device context. will free DP Rx resources for - * status ring - * - * Return: QDF_STATUS_SUCCESS: success - * QDF_STATUS_E_RESOURCES: Error return - */ QDF_STATUS -dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id) +dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id) { + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct dp_srng *mon_status_ring; + uint32_t num_entries; + struct rx_desc_pool *rx_desc_pool; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + union dp_rx_desc_list_elem_t *desc_list = NULL; + union dp_rx_desc_list_elem_t *tail = NULL; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; + + num_entries = mon_status_ring->num_entries; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + + dp_debug("Mon RX Desc Pool[%d] entries=%u", + pdev_id, num_entries); + + return dp_rx_mon_status_buffers_replenish(soc, mac_id, mon_status_ring, + rx_desc_pool, num_entries, + &desc_list, &tail, + HAL_RX_BUF_RBM_SW3_BM); +} + +QDF_STATUS +dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct dp_srng *mon_status_ring; + uint32_t num_entries; + struct rx_desc_pool *rx_desc_pool; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; + + num_entries = mon_status_ring->num_entries; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + + dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries); + + return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool); +} + +void +dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id) +{ + uint32_t i; + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct dp_srng *mon_status_ring; + uint32_t num_entries; + struct rx_desc_pool *rx_desc_pool; + struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx; + + soc_cfg_ctx = soc->wlan_cfg_ctx; + mon_status_ring = &soc->rxdma_mon_status_ring[mac_id]; + + num_entries = mon_status_ring->num_entries; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + + dp_debug("Mon RX Desc status Pool[%d] init entries=%u", + pdev_id, num_entries); + + rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM; + rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; + rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; + + dp_rx_desc_pool_init(soc, mac_id, num_entries + 1, rx_desc_pool); + + qdf_nbuf_queue_init(&pdev->rx_status_q); + + pdev->mon_ppdu_status = DP_PPDU_STATUS_START; + + qdf_mem_zero(&pdev->ppdu_info.rx_status, + sizeof(pdev->ppdu_info.rx_status)); + + qdf_mem_zero(&pdev->rx_mon_stats, sizeof(pdev->rx_mon_stats)); + + dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info, + &pdev->rx_mon_stats); + + for (i = 0; i < MAX_MU_USERS; i++) { + qdf_nbuf_queue_init(&pdev->mpdu_q[i]); + pdev->is_mpdu_hdr[i] = true; + } + + qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS])); + + pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED; +} + +void +dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) { + uint8_t pdev_id = pdev->pdev_id; struct dp_soc *soc = pdev->soc; struct rx_desc_pool *rx_desc_pool; rx_desc_pool = &soc->rx_desc_status[mac_id]; - if (rx_desc_pool->pool_size != 0) { - if (!dp_is_soc_reinit(soc)) - dp_rx_desc_nbuf_and_pool_free(soc, mac_id, - rx_desc_pool); - else - dp_rx_desc_nbuf_free(soc, rx_desc_pool); - } - return QDF_STATUS_SUCCESS; + dp_debug("Mon RX Desc status Pool[%d] deinit", pdev_id); + + dp_rx_desc_pool_deinit(soc, rx_desc_pool); +} + +void +dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) { + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + + dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id); + + dp_rx_desc_pool_free(soc, rx_desc_pool); +} + +void +dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id) +{ + uint8_t pdev_id = pdev->pdev_id; + struct dp_soc *soc = pdev->soc; + struct rx_desc_pool *rx_desc_pool; + + rx_desc_pool = &soc->rx_desc_status[mac_id]; + + dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id); + + dp_rx_desc_nbuf_free(soc, rx_desc_pool); } /* @@ -2132,80 +2253,3 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc, return QDF_STATUS_SUCCESS; } -/** - * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring - * @pdev: core txrx pdev context - * @ring_id: ring number - * This function will attach a DP RX monitor status ring into pDEV - * and replenish monitor status ring with buffer. - * - * Return: QDF_STATUS_SUCCESS: success - * QDF_STATUS_E_RESOURCES: Error return - */ -QDF_STATUS -dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) { - struct dp_soc *soc = pdev->soc; - union dp_rx_desc_list_elem_t *desc_list = NULL; - union dp_rx_desc_list_elem_t *tail = NULL; - struct dp_srng *mon_status_ring; - uint32_t num_entries; - uint32_t i; - struct rx_desc_pool *rx_desc_pool; - QDF_STATUS status; - - mon_status_ring = &soc->rxdma_mon_status_ring[ring_id]; - - num_entries = mon_status_ring->num_entries; - - rx_desc_pool = &soc->rx_desc_status[ring_id]; - - dp_info("Mon RX Status Pool[%d] entries=%d", - ring_id, num_entries); - - if (!dp_is_soc_reinit(soc)) { - status = dp_rx_desc_pool_alloc(soc, num_entries + 1, - rx_desc_pool); - if (!QDF_IS_STATUS_SUCCESS(status)) - return status; - } - - dp_rx_desc_pool_init(soc, ring_id, num_entries + 1, rx_desc_pool); - - rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE; - rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT; - - dp_debug("Mon RX Status Buffers Replenish ring_id=%d", ring_id); - - status = dp_rx_mon_status_buffers_replenish(soc, ring_id, - mon_status_ring, - rx_desc_pool, - num_entries, - &desc_list, &tail, - HAL_RX_BUF_RBM_SW3_BM); - - if (!QDF_IS_STATUS_SUCCESS(status)) - return status; - - qdf_nbuf_queue_init(&pdev->rx_status_q); - - pdev->mon_ppdu_status = DP_PPDU_STATUS_START; - - qdf_mem_zero(&(pdev->ppdu_info.rx_status), - sizeof(pdev->ppdu_info.rx_status)); - - qdf_mem_zero(&pdev->rx_mon_stats, - sizeof(pdev->rx_mon_stats)); - - dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info, - &pdev->rx_mon_stats); - - for (i = 0; i < MAX_MU_USERS; i++) { - qdf_nbuf_queue_init(&pdev->mpdu_q[i]); - pdev->is_mpdu_hdr[i] = true; - } - qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS])); - - pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED; - - return QDF_STATUS_SUCCESS; -} diff --git a/dp/wifi3.0/dp_tx.c b/dp/wifi3.0/dp_tx.c index 61ad121816..94a4462374 100644 --- a/dp/wifi3.0/dp_tx.c +++ b/dp/wifi3.0/dp_tx.c @@ -4205,7 +4205,7 @@ QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev) * Return: QDF_STATUS_SUCCESS: success * QDF_STATUS_E_RESOURCES: Error return */ -QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev) +QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev) { struct dp_soc *soc = pdev->soc; @@ -4239,8 +4239,8 @@ QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev) #ifdef QCA_LL_TX_FLOW_CONTROL_V2 /* Pools will be allocated dynamically */ -static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, - int num_desc) +static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, + int num_desc) { uint8_t i; @@ -4249,7 +4249,17 @@ static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, soc->tx_desc[i].status = FLOW_POOL_INACTIVE; } - return 0; + return QDF_STATUS_SUCCESS; +} + +static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool, + int num_desc) +{ + return QDF_STATUS_SUCCESS; +} + +static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool) +{ } static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) @@ -4260,105 +4270,127 @@ static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock); } #else /* QCA_LL_TX_FLOW_CONTROL_V2! */ -static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, - int num_desc) +static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool, + int num_desc) { - uint8_t i; + uint8_t i, count; /* Allocate software Tx descriptor pools */ for (i = 0; i < num_pool; i++) { if (dp_tx_desc_pool_alloc(soc, i, num_desc)) { QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "%s Tx Desc Pool alloc %d failed %pK", - __func__, i, soc); - return ENOMEM; + FL("Tx Desc Pool alloc %d failed %pK"), + i, soc); + goto fail; } } - return 0; + return QDF_STATUS_SUCCESS; + +fail: + for (count = 0; count < i; count++) + dp_tx_desc_pool_free(soc, count); + + return QDF_STATUS_E_NOMEM; +} + +static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool, + int num_desc) +{ + uint8_t i; + for (i = 0; i < num_pool; i++) { + if (dp_tx_desc_pool_init(soc, i, num_desc)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + FL("Tx Desc Pool init %d failed %pK"), + i, soc); + return QDF_STATUS_E_NOMEM; + } + } + return QDF_STATUS_SUCCESS; +} + +static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool) +{ + uint8_t i; + + for (i = 0; i < num_pool; i++) + dp_tx_desc_pool_deinit(soc, i); } static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool) { uint8_t i; - for (i = 0; i < num_pool; i++) { - qdf_assert_always(!soc->tx_desc[i].num_allocated); - if (dp_tx_desc_pool_free(soc, i)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, - "%s Tx Desc Pool Free failed", __func__); - } - } + for (i = 0; i < num_pool; i++) + dp_tx_desc_pool_free(soc, i); } #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ -#ifndef QCA_MEM_ATTACH_ON_WIFI3 /** - * dp_tso_attach_wifi3() - TSO attach handler - * @txrx_soc: Opaque Dp handle + * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors + * @soc: core txrx main context + * @num_pool: number of pools * - * Reserve TSO descriptor buffers - * - * Return: QDF_STATUS_E_FAILURE on failure or - * QDF_STATUS_SUCCESS on success */ -static -QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc) +void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool) { - return dp_tso_soc_attach(txrx_soc); + dp_tx_tso_desc_pool_deinit(soc, num_pool); + dp_tx_tso_num_seg_pool_deinit(soc, num_pool); } /** - * dp_tso_detach_wifi3() - TSO Detach handler - * @txrx_soc: Opaque Dp handle + * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors + * @soc: core txrx main context + * @num_pool: number of pools * - * Deallocate TSO descriptor buffers - * - * Return: QDF_STATUS_E_FAILURE on failure or - * QDF_STATUS_SUCCESS on success */ -static -QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc) +void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool) { - return dp_tso_soc_detach(txrx_soc); -} -#else -static -QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc) -{ - return QDF_STATUS_SUCCESS; + dp_tx_tso_desc_pool_free(soc, num_pool); + dp_tx_tso_num_seg_pool_free(soc, num_pool); } -static -QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc) +/** + * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors + * @soc: core txrx main context + * + * This function frees all tx related descriptors as below + * 1. Regular TX descriptors (static pools) + * 2. extension TX descriptors (used for ME, RAW, TSO etc...) + * 3. TSO descriptors + * + */ +void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc) { - return QDF_STATUS_SUCCESS; -} -#endif - -QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc) -{ - struct dp_soc *soc = (struct dp_soc *)txrx_soc; - uint8_t i; uint8_t num_pool; - uint32_t num_desc; num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); - num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); - for (i = 0; i < num_pool; i++) - dp_tx_tso_desc_pool_free(soc, i); + dp_tx_tso_cmn_desc_pool_free(soc, num_pool); + dp_tx_ext_desc_pool_free(soc, num_pool); + dp_tx_delete_static_pools(soc, num_pool); +} - dp_info("%s TSO Desc Pool %d Free descs = %d", - __func__, num_pool, num_desc); +/** + * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors + * @soc: core txrx main context + * + * This function de-initializes all tx related descriptors as below + * 1. Regular TX descriptors (static pools) + * 2. extension TX descriptors (used for ME, RAW, TSO etc...) + * 3. TSO descriptors + * + */ +void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc) +{ + uint8_t num_pool; - for (i = 0; i < num_pool; i++) - dp_tx_tso_num_seg_pool_free(soc, i); + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); - dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d", - __func__, num_pool, num_desc); - - return QDF_STATUS_SUCCESS; + dp_tx_flow_control_deinit(soc); + dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool); + dp_tx_ext_desc_pool_deinit(soc, num_pool); + dp_tx_deinit_static_pools(soc, num_pool); } /** @@ -4370,105 +4402,69 @@ QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc) * Return: QDF_STATUS_E_FAILURE on failure or * QDF_STATUS_SUCCESS on success */ -QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc) +QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, + uint8_t num_pool, + uint16_t num_desc) { - struct dp_soc *soc = (struct dp_soc *)txrx_soc; - uint8_t i; - uint8_t num_pool; - uint32_t num_desc; - - num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); - num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); - - for (i = 0; i < num_pool; i++) { - if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) { - dp_err("TSO Desc Pool alloc %d failed %pK", - i, soc); - - return QDF_STATUS_E_FAILURE; - } + if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) { + dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc); + return QDF_STATUS_E_FAILURE; } - dp_info("%s TSO Desc Alloc %d, descs = %d", - __func__, num_pool, num_desc); - - for (i = 0; i < num_pool; i++) { - if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) { - dp_err("TSO Num of seg Pool alloc %d failed %pK", - i, soc); - - return QDF_STATUS_E_FAILURE; - } + if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) { + dp_err("TSO Num of seg Pool alloc %d failed %pK", + num_pool, soc); + return QDF_STATUS_E_FAILURE; } return QDF_STATUS_SUCCESS; } /** - * dp_tx_soc_detach() - detach soc from dp tx - * @soc: core txrx main context + * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init + * @soc: DP soc handle + * @num_pool: Number of pools + * @num_desc: Number of descriptors * - * This function will detach dp tx into main device context - * will free dp tx resource and initialize resources + * Initialize TSO descriptor pools * - * Return: QDF_STATUS_SUCCESS: success - * QDF_STATUS_E_RESOURCES: Error return + * Return: QDF_STATUS_E_FAILURE on failure or + * QDF_STATUS_SUCCESS on success */ -QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc) + +QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, + uint8_t num_pool, + uint16_t num_desc) { - uint8_t num_pool; - uint16_t num_desc; - uint16_t num_ext_desc; - uint8_t i; - QDF_STATUS status = QDF_STATUS_SUCCESS; - - num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); - num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); - num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); - - dp_tx_flow_control_deinit(soc); - dp_tx_delete_static_pools(soc, num_pool); - - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, - "%s Tx Desc Pool Free num_pool = %d, descs = %d", - __func__, num_pool, num_desc); - - for (i = 0; i < num_pool; i++) { - if (dp_tx_ext_desc_pool_free(soc, i)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, - "%s Tx Ext Desc Pool Free failed", - __func__); - return QDF_STATUS_E_RESOURCES; - } + if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) { + dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc); + return QDF_STATUS_E_FAILURE; } - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, - "%s MSDU Ext Desc Pool %d Free descs = %d", - __func__, num_pool, num_ext_desc); - - status = dp_tso_detach_wifi3(soc); - if (status != QDF_STATUS_SUCCESS) - return status; - + if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) { + dp_err("TSO Num of seg Pool alloc %d failed %pK", + num_pool, soc); + return QDF_STATUS_E_FAILURE; + } return QDF_STATUS_SUCCESS; } /** - * dp_tx_soc_attach() - attach soc to dp tx + * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory * @soc: core txrx main context * - * This function will attach dp tx into main device context - * will allocate dp tx resource and initialize resources + * This function allocates memory for following descriptor pools + * 1. regular sw tx descriptor pools (static pools) + * 2. TX extension descriptor pools (ME, RAW, TSO etc...) + * 3. TSO descriptor pools * * Return: QDF_STATUS_SUCCESS: success * QDF_STATUS_E_RESOURCES: Error return */ -QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc) +QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc) { - uint8_t i; uint8_t num_pool; uint32_t num_desc; uint32_t num_ext_desc; - QDF_STATUS status = QDF_STATUS_SUCCESS; num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); @@ -4480,68 +4476,117 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc) if ((num_pool > MAX_TXDESC_POOLS) || (num_desc > WLAN_CFG_NUM_TX_DESC_MAX)) - goto fail; + goto fail1; if (dp_tx_alloc_static_pools(soc, num_pool, num_desc)) - goto fail; + goto fail1; - dp_tx_flow_control_init(soc); + if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc)) + goto fail2; - /* Allocate extension tx descriptor pools */ - for (i = 0; i < num_pool; i++) { - if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "MSDU Ext Desc Pool alloc %d failed %pK", - i, soc); + if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; - goto fail; - } - } - - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, - "%s MSDU Ext Desc Alloc %d, descs = %d", - __func__, num_pool, num_ext_desc); - - status = dp_tso_attach_wifi3((void *)soc); - if (status != QDF_STATUS_SUCCESS) - goto fail; - - - /* Initialize descriptors in TCL Rings */ - if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { - for (i = 0; i < soc->num_tcl_data_rings; i++) { - hal_tx_init_data_ring(soc->hal_soc, - soc->tcl_data_ring[i].hal_srng); - } - if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) - hal_tx_init_data_ring(soc->hal_soc, - soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng); - } - - /* - * Initialize command/credit ring descriptor - * Command/CREDIT ring also used for sending DATA cmds - */ - hal_tx_init_cmd_credit_ring(soc->hal_soc, - soc->tcl_cmd_credit_ring.hal_srng); - - /* - * todo - Add a runtime config option to enable this. - */ - /* - * Due to multiple issues on NPR EMU, enable it selectively - * only for NPR EMU, should be removed, once NPR platforms - * are stable. - */ - soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; - - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO, - "%s HAL Tx init Success", __func__); + if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc)) + goto fail3; return QDF_STATUS_SUCCESS; -fail: - /* Detach will take care of freeing only allocated resources */ - dp_tx_soc_detach(soc); +fail3: + dp_tx_ext_desc_pool_free(soc, num_pool); +fail2: + dp_tx_delete_static_pools(soc, num_pool); +fail1: return QDF_STATUS_E_RESOURCES; } + +/** + * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools + * @soc: core txrx main context + * + * This function initializes the following TX descriptor pools + * 1. regular sw tx descriptor pools (static pools) + * 2. TX extension descriptor pools (ME, RAW, TSO etc...) + * 3. TSO descriptor pools + * + * Return: QDF_STATUS_SUCCESS: success + * QDF_STATUS_E_RESOURCES: Error return + */ +QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc) +{ + uint8_t num_pool; + uint32_t num_desc; + uint32_t num_ext_desc; + + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); + + if (dp_tx_init_static_pools(soc, num_pool, num_desc)) + goto fail1; + + if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc)) + goto fail2; + + if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx)) + return QDF_STATUS_SUCCESS; + + if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc)) + goto fail3; + + dp_tx_flow_control_init(soc); + soc->process_tx_status = CONFIG_PROCESS_TX_STATUS; + return QDF_STATUS_SUCCESS; + +fail3: + dp_tx_ext_desc_pool_deinit(soc, num_pool); +fail2: + dp_tx_deinit_static_pools(soc, num_pool); +fail1: + return QDF_STATUS_E_RESOURCES; +} + +/** + * dp_tso_soc_attach() - Allocate and initialize TSO descriptors + * @txrx_soc: dp soc handle + * + * Return: QDF_STATUS - QDF_STATUS_SUCCESS + * QDF_STATUS_E_FAILURE + */ +QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + uint8_t num_pool; + uint32_t num_desc; + uint32_t num_ext_desc; + + num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx); + num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx); + + if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc)) + return QDF_STATUS_E_FAILURE; + + if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc)) + return QDF_STATUS_E_FAILURE; + + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tso_soc_detach() - de-initialize and free the TSO descriptors + * @txrx_soc: dp soc handle + * + * Return: QDF_STATUS - QDF_STATUS_SUCCESS + */ +QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc) +{ + struct dp_soc *soc = (struct dp_soc *)txrx_soc; + uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx); + + dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool); + dp_tx_tso_cmn_desc_pool_free(soc, num_pool); + + return QDF_STATUS_SUCCESS; +} + diff --git a/dp/wifi3.0/dp_tx.h b/dp/wifi3.0/dp_tx.h index 5caaca1f17..1de211e203 100644 --- a/dp/wifi3.0/dp_tx.h +++ b/dp/wifi3.0/dp_tx.h @@ -171,9 +171,29 @@ void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index); QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev); QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev); void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev); +void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); +void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); +QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, + uint8_t num_pool, + uint16_t num_desc); +QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, + uint8_t num_pool, + uint16_t num_desc); +QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev); +QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev); -QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc); -QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc); +void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool); +void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool); +void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc); +void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc); +QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc, + uint8_t num_pool, + uint16_t num_desc); +QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc, + uint8_t num_pool, + uint16_t num_desc); +QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc); +QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc); /** * dp_tso_attach() - TSO Attach handler @@ -197,8 +217,7 @@ QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc); */ QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc); -QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev); -QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev); +QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev); qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf); diff --git a/dp/wifi3.0/dp_tx_desc.c b/dp/wifi3.0/dp_tx_desc.c index dc3e34eb60..16ce348e08 100644 --- a/dp/wifi3.0/dp_tx_desc.c +++ b/dp/wifi3.0/dp_tx_desc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -65,7 +65,7 @@ dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool, /** * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s) * @soc Handle to DP SoC structure - * @num_pool Number of pools to allocate + * @pool_id pool to allocate * @num_elem Number of descriptor elements per pool * * This function allocates memory for SW tx descriptors @@ -91,42 +91,77 @@ dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool, * Return: Status code. 0 for success. */ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, - uint16_t num_elem) + uint16_t num_elem) { - uint32_t id, count, page_id, offset, pool_id_32; - uint16_t num_desc_per_page; - struct dp_tx_desc_s *tx_desc_elem; uint32_t desc_size; - struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]); + struct dp_tx_desc_pool_s *tx_desc_pool; + + desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s)); + tx_desc_pool = &((soc)->tx_desc[(pool_id)]); + qdf_mem_multi_pages_alloc(soc->osdev, + &tx_desc_pool->desc_pages, + desc_size, num_elem, + 0, true); - desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem)); - tx_desc_pool->elem_size = desc_size; - if (!dp_is_soc_reinit(soc)) - qdf_mem_multi_pages_alloc(soc->osdev, - &tx_desc_pool->desc_pages, - desc_size, num_elem, - 0, true); if (!tx_desc_pool->desc_pages.num_pages) { dp_err("Multi page alloc fail, tx desc"); - goto fail_exit; + return QDF_STATUS_E_NOMEM; } + return QDF_STATUS_SUCCESS; +} +/** + * dp_tx_desc_pool_free() - Free the tx dexcriptor pools + * @soc: Handle to DP SoC structure + * @pool_id: pool to free + * + */ +void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +{ + struct dp_tx_desc_pool_s *tx_desc_pool; - num_desc_per_page = - tx_desc_pool->desc_pages.num_element_per_page; - tx_desc_pool->freelist = (struct dp_tx_desc_s *) - *tx_desc_pool->desc_pages.cacheable_pages; + tx_desc_pool = &((soc)->tx_desc[pool_id]); + + if (tx_desc_pool->desc_pages.num_pages) + qdf_mem_multi_pages_free(soc->osdev, + &tx_desc_pool->desc_pages, 0, true); +} + +/** + * dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s) + * @soc: Handle to DP SoC structure + * @pool_id: pool to allocate + * @num_elem: Number of descriptor elements per pool + * + * Return: QDF_STATUS_SUCCESS + * QDF_STATUS_E_FAULT + */ +QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem) +{ + uint32_t id, count, page_id, offset, pool_id_32; + struct dp_tx_desc_pool_s *tx_desc_pool; + struct dp_tx_desc_s *tx_desc_elem; + uint16_t num_desc_per_page; + uint32_t desc_size; + + desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem)); + + tx_desc_pool = &((soc)->tx_desc[(pool_id)]); if (qdf_mem_multi_page_link(soc->osdev, &tx_desc_pool->desc_pages, desc_size, num_elem, true)) { - dp_err("invalid tx desc allocation - overflow num link"); - goto free_tx_desc; + dp_err("invalid tx desc allocation -overflow num link"); + return QDF_STATUS_E_FAULT; } + tx_desc_pool->freelist = (struct dp_tx_desc_s *) + *tx_desc_pool->desc_pages.cacheable_pages; /* Set unique IDs for each Tx descriptor */ tx_desc_elem = tx_desc_pool->freelist; count = 0; pool_id_32 = (uint32_t)pool_id; + num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page; while (tx_desc_elem) { page_id = count / num_desc_per_page; offset = count % num_desc_per_page; @@ -139,365 +174,532 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, count++; } + tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem)); + dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem); TX_DESC_LOCK_CREATE(&tx_desc_pool->lock); + return QDF_STATUS_SUCCESS; - -free_tx_desc: - qdf_mem_multi_pages_free(soc->osdev, - &tx_desc_pool->desc_pages, 0, true); - -fail_exit: - return QDF_STATUS_E_FAULT; } /** - * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors - * + * dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s) * @soc Handle to DP SoC structure - * @pool_id + * @pool_id: pool to de-initialize * - * Return: */ -QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id) { - struct dp_tx_desc_pool_s *tx_desc_pool = - &((soc)->tx_desc[(pool_id)]); + struct dp_tx_desc_pool_s *tx_desc_pool; - qdf_mem_multi_pages_free(soc->osdev, - &tx_desc_pool->desc_pages, 0, true); - TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock); + tx_desc_pool = &((soc)->tx_desc[(pool_id)]); TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool); - return QDF_STATUS_SUCCESS; + TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock); } /** - * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool - * @soc Handle to DP SoC structure - * @pool_id + * dp_tx_ext_desc_pool_alloc() - allocate Tx extenstion Descriptor pool(s) + * @soc: Handle to DP SoC structure + * @num_pool: Number of pools to allocate + * @num_elem: Number of descriptor elements per pool * - * Return: NONE + * Return - QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM */ -QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, - uint16_t num_elem) +QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) { - uint16_t num_page; - uint32_t count; - struct dp_tx_ext_desc_elem_s *c_elem, *p_elem; - struct qdf_mem_dma_page_t *page_info; - struct qdf_mem_multi_page_t *pages; - QDF_STATUS status; + QDF_STATUS status = QDF_STATUS_SUCCESS; qdf_dma_context_t memctx = 0; + uint8_t pool_id, count; + uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA; + struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool; + uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s); /* Coherent tx extension descriptor alloc */ - soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA; - soc->tx_ext_desc[pool_id].elem_count = num_elem; - memctx = qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx); - if (!dp_is_soc_reinit(soc)) { + + for (pool_id = 0; pool_id < num_pool; pool_id++) { + dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); + memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx); qdf_mem_multi_pages_alloc(soc->osdev, - &soc->tx_ext_desc[pool_id]. - desc_pages, - soc->tx_ext_desc[pool_id].elem_size, - soc->tx_ext_desc[pool_id].elem_count, + &dp_tx_ext_desc_pool->desc_pages, + elem_size, + num_elem, memctx, false); - } - if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "ext desc page alloc fail"); - status = QDF_STATUS_E_NOMEM; - goto fail_exit; + + if (!dp_tx_ext_desc_pool->desc_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "ext desc page alloc fail"); + status = QDF_STATUS_E_NOMEM; + goto fail_exit; + } } - num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages; /* * Cacheable ext descriptor link alloc * This structure also large size already * single element is 24bytes, 2K elements are 48Kbytes * Have to alloc multi page cacheable memory */ - soc->tx_ext_desc[pool_id].link_elem_size = - sizeof(struct dp_tx_ext_desc_elem_s); - if (!dp_is_soc_reinit(soc)) { + for (pool_id = 0; pool_id < num_pool; pool_id++) { + dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); qdf_mem_multi_pages_alloc(soc->osdev, - &soc->tx_ext_desc[pool_id]. - desc_link_pages, - soc->tx_ext_desc[pool_id]. + &dp_tx_ext_desc_pool->desc_link_pages, link_elem_size, - soc->tx_ext_desc[pool_id]. - elem_count, + num_elem, 0, true); + + if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "ext link desc page alloc fail"); + status = QDF_STATUS_E_NOMEM; + goto free_ext_desc_page; + } } - if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "ext link desc page alloc fail"); - status = QDF_STATUS_E_NOMEM; - goto free_ext_desc_page; + return status; + +free_ext_desc_page: + for (count = 0; count < pool_id; pool_id++) { + dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); + qdf_mem_multi_pages_free(soc->osdev, + &dp_tx_ext_desc_pool->desc_link_pages, + 0, true); } + pool_id = num_pool; + +fail_exit: + for (count = 0; count < pool_id; pool_id++) { + dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); + memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx); + qdf_mem_multi_pages_free(soc->osdev, + &dp_tx_ext_desc_pool->desc_pages, + memctx, false); + } + return status; +} + +/** + * dp_tx_ext_desc_pool_init() - initialize Tx extenstion Descriptor pool(s) + * @soc: Handle to DP SoC structure + * @num_pool: Number of pools to initialize + * @num_elem: Number of descriptor elements per pool + * + * Return - QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM + */ +QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) +{ + uint32_t i; + struct dp_tx_ext_desc_elem_s *c_elem, *p_elem; + struct qdf_mem_dma_page_t *page_info; + struct qdf_mem_multi_page_t *pages; + struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool; + uint8_t pool_id; + QDF_STATUS status; /* link tx descriptors into a freelist */ - soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *) - *soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages; - if (qdf_mem_multi_page_link(soc->osdev, - &soc->tx_ext_desc[pool_id].desc_link_pages, - soc->tx_ext_desc[pool_id].link_elem_size, - soc->tx_ext_desc[pool_id].elem_count, true)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "ext link desc page linking fail"); - status = QDF_STATUS_E_FAULT; - goto free_ext_link_desc_page; - } + for (pool_id = 0; pool_id < num_pool; pool_id++) { + dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); + soc->tx_ext_desc[pool_id].elem_size = + HAL_TX_EXT_DESC_WITH_META_DATA; + soc->tx_ext_desc[pool_id].link_elem_size = + sizeof(struct dp_tx_ext_desc_elem_s); + soc->tx_ext_desc[pool_id].elem_count = num_elem; - /* Assign coherent memory pointer into linked free list */ - pages = &soc->tx_ext_desc[pool_id].desc_pages; - page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages; - c_elem = soc->tx_ext_desc[pool_id].freelist; - p_elem = c_elem; - for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) { - if (!(count % pages->num_element_per_page)) { + dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *) + *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages; + + if (qdf_mem_multi_page_link(soc->osdev, + &dp_tx_ext_desc_pool-> + desc_link_pages, + dp_tx_ext_desc_pool->link_elem_size, + dp_tx_ext_desc_pool->elem_count, + true)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "ext link desc page linking fail"); + status = QDF_STATUS_E_FAULT; + goto fail; + } + + /* Assign coherent memory pointer into linked free list */ + pages = &dp_tx_ext_desc_pool->desc_pages; + page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages; + c_elem = dp_tx_ext_desc_pool->freelist; + p_elem = c_elem; + for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) { + if (!(i % pages->num_element_per_page)) { /** * First element for new page, * should point next page */ - if (!pages->dma_pages->page_v_addr_start) { - QDF_TRACE(QDF_MODULE_ID_DP, - QDF_TRACE_LEVEL_ERROR, - "link over flow"); - status = QDF_STATUS_E_FAULT; - goto free_ext_link_desc_page; + if (!pages->dma_pages->page_v_addr_start) { + QDF_TRACE(QDF_MODULE_ID_DP, + QDF_TRACE_LEVEL_ERROR, + "link over flow"); + status = QDF_STATUS_E_FAULT; + goto fail; + } + + c_elem->vaddr = + (void *)page_info->page_v_addr_start; + c_elem->paddr = page_info->page_p_addr; + page_info++; + } else { + c_elem->vaddr = (void *)(p_elem->vaddr + + dp_tx_ext_desc_pool->elem_size); + c_elem->paddr = (p_elem->paddr + + dp_tx_ext_desc_pool->elem_size); } - c_elem->vaddr = (void *)page_info->page_v_addr_start; - c_elem->paddr = page_info->page_p_addr; - page_info++; - } else { - c_elem->vaddr = (void *)(p_elem->vaddr + - soc->tx_ext_desc[pool_id].elem_size); - c_elem->paddr = (p_elem->paddr + - soc->tx_ext_desc[pool_id].elem_size); + p_elem = c_elem; + c_elem = c_elem->next; + if (!c_elem) + break; } - p_elem = c_elem; - c_elem = c_elem->next; - if (!c_elem) - break; + dp_tx_ext_desc_pool->num_free = num_elem; + qdf_spinlock_create(&dp_tx_ext_desc_pool->lock); } - - soc->tx_ext_desc[pool_id].num_free = num_elem; - qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock); return QDF_STATUS_SUCCESS; -free_ext_link_desc_page: - qdf_mem_multi_pages_free(soc->osdev, - &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true); - -free_ext_desc_page: - qdf_mem_multi_pages_free(soc->osdev, - &soc->tx_ext_desc[pool_id].desc_pages, - qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx), - false); - -fail_exit: +fail: return status; - } /** - * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool + * dp_tx_ext_desc_pool_free() - free Tx extenstion Descriptor pool(s) * @soc: Handle to DP SoC structure - * @pool_id: extension descriptor pool id + * @num_pool: Number of pools to free * - * Return: NONE */ -QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool) { - qdf_mem_multi_pages_free(soc->osdev, - &soc->tx_ext_desc[pool_id].desc_link_pages, 0, true); + uint8_t pool_id; + struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool; + qdf_dma_context_t memctx = 0; - qdf_mem_multi_pages_free(soc->osdev, - &soc->tx_ext_desc[pool_id].desc_pages, - qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx), - false); + for (pool_id = 0; pool_id < num_pool; pool_id++) { + dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); + memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx); - qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock); - return QDF_STATUS_SUCCESS; + qdf_mem_multi_pages_free(soc->osdev, + &dp_tx_ext_desc_pool->desc_link_pages, + 0, true); + + qdf_mem_multi_pages_free(soc->osdev, + &dp_tx_ext_desc_pool->desc_pages, + memctx, false); + } } /** - * dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool + * dp_tx_ext_desc_pool_deinit() - deinit Tx extenstion Descriptor pool(s) * @soc: Handle to DP SoC structure - * @pool_id: tso descriptor pool id - * @num_elem: number of element + * @num_pool: Number of pools to de-initialize * - * Return: QDF_STATUS_SUCCESS */ +void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool) +{ + uint8_t pool_id; + struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool; + + for (pool_id = 0; pool_id < num_pool; pool_id++) { + dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]); + qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock); + } +} + #if defined(FEATURE_TSO) -QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, - uint16_t num_elem) +/** + * dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s) + * @soc: Handle to DP SoC structure + * @num_pool: Number of pools to allocate + * @num_elem: Number of descriptor elements per pool + * + * Return - QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM + */ +QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) { struct dp_tx_tso_seg_pool_s *tso_desc_pool; - uint32_t desc_size; + uint32_t desc_size, pool_id, i; - tso_desc_pool = &soc->tx_tso_desc[pool_id]; - tso_desc_pool->num_free = 0; desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t)); - if (!dp_is_soc_reinit(soc)) + for (pool_id = 0; pool_id < num_pool; pool_id++) { + tso_desc_pool = &soc->tx_tso_desc[pool_id]; + tso_desc_pool->num_free = 0; qdf_mem_multi_pages_alloc(soc->osdev, &tso_desc_pool->desc_pages, desc_size, num_elem, 0, true); - if (!tso_desc_pool->desc_pages.num_pages) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - FL("Alloc Failed %pK pool_id %d"), - soc, pool_id); - return QDF_STATUS_E_NOMEM; + if (!tso_desc_pool->desc_pages.num_pages) { + dp_err("Multi page alloc fail, tx desc"); + goto fail; + } } - - tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *) - *tso_desc_pool->desc_pages.cacheable_pages; - tso_desc_pool->num_free = num_elem; - if (qdf_mem_multi_page_link(soc->osdev, - &tso_desc_pool->desc_pages, - desc_size, - num_elem, true)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "invalid tso desc allocation - overflow num link"); - goto free_tso_desc; - } - TSO_DEBUG("Number of free descriptors: %u\n", tso_desc_pool->num_free); - tso_desc_pool->pool_size = num_elem; - qdf_spinlock_create(&tso_desc_pool->lock); - return QDF_STATUS_SUCCESS; -free_tso_desc: - qdf_mem_multi_pages_free(soc->osdev, - &tso_desc_pool->desc_pages, 0, true); - - return QDF_STATUS_E_FAULT; +fail: + for (i = 0; i < pool_id; i++) { + tso_desc_pool = &soc->tx_tso_desc[i]; + qdf_mem_multi_pages_free(soc->osdev, + &tso_desc_pool->desc_pages, + 0, true); + } + return QDF_STATUS_E_NOMEM; } /** - * dp_tx_tso_desc_pool_free() - free tx tso descriptor pool + * dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s) * @soc: Handle to DP SoC structure - * @pool_id: extension descriptor pool id + * @num_pool: Number of pools to free * - * Return: NONE */ -void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) +void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool) { struct dp_tx_tso_seg_pool_s *tso_desc_pool; + uint32_t pool_id; - tso_desc_pool = &soc->tx_tso_desc[pool_id]; - - qdf_spin_lock_bh(&tso_desc_pool->lock); - - qdf_mem_multi_pages_free(soc->osdev, - &tso_desc_pool->desc_pages, 0, true); - tso_desc_pool->freelist = NULL; - tso_desc_pool->num_free = 0; - tso_desc_pool->pool_size = 0; - qdf_spin_unlock_bh(&tso_desc_pool->lock); - qdf_spinlock_destroy(&tso_desc_pool->lock); - return; + for (pool_id = 0; pool_id < num_pool; pool_id++) { + tso_desc_pool = &soc->tx_tso_desc[pool_id]; + qdf_mem_multi_pages_free(soc->osdev, + &tso_desc_pool->desc_pages, 0, true); + } } + +/** + * dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s) + * @soc: Handle to DP SoC structure + * @num_pool: Number of pools to initialize + * @num_elem: Number of descriptor elements per pool + * + * Return - QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM + */ +QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) +{ + struct dp_tx_tso_seg_pool_s *tso_desc_pool; + uint32_t desc_size, pool_id; + + desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t)); + + for (pool_id = 0; pool_id < num_pool; pool_id++) { + tso_desc_pool = &soc->tx_tso_desc[pool_id]; + + if (qdf_mem_multi_page_link(soc->osdev, + &tso_desc_pool->desc_pages, + desc_size, + num_elem, true)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "invalid tso desc allocation - overflow num link"); + return QDF_STATUS_E_FAULT; + } + + tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *) + *tso_desc_pool->desc_pages.cacheable_pages; + tso_desc_pool->num_free = num_elem; + + TSO_DEBUG("Number of free descriptors: %u\n", + tso_desc_pool->num_free); + tso_desc_pool->pool_size = num_elem; + qdf_spinlock_create(&tso_desc_pool->lock); + } + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s) + * @soc: Handle to DP SoC structure + * @num_pool: Number of pools to free + * + */ +void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool) +{ + struct dp_tx_tso_seg_pool_s *tso_desc_pool; + uint32_t pool_id; + + for (pool_id = 0; pool_id < num_pool; pool_id++) { + tso_desc_pool = &soc->tx_tso_desc[pool_id]; + qdf_spin_lock_bh(&tso_desc_pool->lock); + + tso_desc_pool->freelist = NULL; + tso_desc_pool->num_free = 0; + tso_desc_pool->pool_size = 0; + qdf_spin_unlock_bh(&tso_desc_pool->lock); + qdf_spinlock_destroy(&tso_desc_pool->lock); + } +} + /** * dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the * fragments in each tso segment * * @soc: handle to dp soc structure - * @pool_id: descriptor pool id + * @num_pool: number of pools to allocate * @num_elem: total number of descriptors to be allocated + * + * Return - QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM */ -QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, - uint16_t num_elem) +QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) { struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool; - uint32_t desc_size; + uint32_t desc_size, pool_id, i; - tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; - tso_num_seg_pool->num_free = 0; desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t)); - if (!dp_is_soc_reinit(soc)) + + for (pool_id = 0; pool_id < num_pool; pool_id++) { + tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; + tso_num_seg_pool->num_free = 0; qdf_mem_multi_pages_alloc(soc->osdev, &tso_num_seg_pool->desc_pages, desc_size, num_elem, 0, true); - if (!tso_num_seg_pool->desc_pages.num_pages) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - FL("Alloc Failed %pK pool_id %d"), - soc, pool_id); - return QDF_STATUS_E_NOMEM; + + if (!tso_num_seg_pool->desc_pages.num_pages) { + dp_err("Multi page alloc fail, tso_num_seg_pool"); + goto fail; + } } - - if (qdf_mem_multi_page_link(soc->osdev, - &tso_num_seg_pool->desc_pages, - desc_size, - num_elem, true)) { - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "invalid tso desc allocation - overflow num link"); - goto fail; - } - - tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *) - *tso_num_seg_pool->desc_pages.cacheable_pages; - tso_num_seg_pool->num_free = num_elem; - tso_num_seg_pool->num_seg_pool_size = num_elem; - - qdf_spinlock_create(&tso_num_seg_pool->lock); - return QDF_STATUS_SUCCESS; fail: - qdf_mem_multi_pages_free(soc->osdev, - &tso_num_seg_pool->desc_pages, 0, true); - + for (i = 0; i < pool_id; i++) { + tso_num_seg_pool = &soc->tx_tso_num_seg[i]; + qdf_mem_multi_pages_free(soc->osdev, + &tso_num_seg_pool->desc_pages, + 0, true); + } return QDF_STATUS_E_NOMEM; } /** - * dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks - * the fragments in tso segment - * + * dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the + * fragments in each tso segment * * @soc: handle to dp soc structure - * @pool_id: descriptor pool_id + * @num_pool: number of pools to free */ -void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id) +void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool) { struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool; + uint32_t pool_id; - tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; - qdf_spin_lock_bh(&tso_num_seg_pool->lock); - - qdf_mem_multi_pages_free(soc->osdev, - &tso_num_seg_pool->desc_pages, 0, true); - tso_num_seg_pool->freelist = NULL; - tso_num_seg_pool->num_free = 0; - tso_num_seg_pool->num_seg_pool_size = 0; - qdf_spin_unlock_bh(&tso_num_seg_pool->lock); - qdf_spinlock_destroy(&tso_num_seg_pool->lock); - return; + for (pool_id = 0; pool_id < num_pool; pool_id++) { + tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; + qdf_mem_multi_pages_free(soc->osdev, + &tso_num_seg_pool->desc_pages, + 0, true); + } } +/** + * dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the + * fragments in each tso segment + * + * @soc: handle to dp soc structure + * @num_pool: number of pools to initialize + * @num_elem: total number of descriptors to be initialized + * + * Return - QDF_STATUS_SUCCESS + * QDF_STATUS_E_FAULT + */ +QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) +{ + struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool; + uint32_t desc_size, pool_id; + + desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t)); + + for (pool_id = 0; pool_id < num_pool; pool_id++) { + tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; + if (qdf_mem_multi_page_link(soc->osdev, + &tso_num_seg_pool->desc_pages, + desc_size, + num_elem, true)) { + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "invalid tso desc allocation - overflow num link"); + return QDF_STATUS_E_FAULT; + } + + tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *) + *tso_num_seg_pool->desc_pages.cacheable_pages; + tso_num_seg_pool->num_free = num_elem; + tso_num_seg_pool->num_seg_pool_size = num_elem; + + qdf_spinlock_create(&tso_num_seg_pool->lock); + } + return QDF_STATUS_SUCCESS; +} + +/** + * dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the + * fragments in each tso segment + * + * @soc: handle to dp soc structure + * @num_pool: number of pools to de-initialize + * + * Return - QDF_STATUS_SUCCESS + * QDF_STATUS_E_FAULT + */ +void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool) +{ + struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool; + uint32_t pool_id; + + for (pool_id = 0; pool_id < num_pool; pool_id++) { + tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id]; + qdf_spin_lock_bh(&tso_num_seg_pool->lock); + + tso_num_seg_pool->freelist = NULL; + tso_num_seg_pool->num_free = 0; + tso_num_seg_pool->num_seg_pool_size = 0; + qdf_spin_unlock_bh(&tso_num_seg_pool->lock); + qdf_spinlock_destroy(&tso_num_seg_pool->lock); + } +} #else -QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, - uint16_t num_elem) +QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) { return QDF_STATUS_SUCCESS; } -void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id) -{ - return; -} - -QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, - uint16_t num_elem) +QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) { return QDF_STATUS_SUCCESS; } -void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id) +void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool) +{ +} + +void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool) +{ +} + +QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) +{ + return QDF_STATUS_SUCCESS; +} + +void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool) +{ +} + +QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool, + uint16_t num_elem) +{ + return QDF_STATUS_SUCCESS; +} + +void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool) { - return; } #endif diff --git a/dp/wifi3.0/dp_tx_desc.h b/dp/wifi3.0/dp_tx_desc.h index 7403d9e860..1832fab3b1 100644 --- a/dp/wifi3.0/dp_tx_desc.h +++ b/dp/wifi3.0/dp_tx_desc.h @@ -78,17 +78,32 @@ do { \ #define MAX_POOL_BUFF_COUNT 10000 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, - uint16_t num_elem); -QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); + uint16_t num_elem); +QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); +void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id); + QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, - uint16_t num_elem); -QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); + uint16_t num_elem); +QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); +void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); +void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id); + QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, - uint16_t num_elem); + uint16_t num_elem); +QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id); +void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id); + QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id, uint16_t num_elem); +QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id, + uint16_t num_elem); void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id); +void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id); #ifdef QCA_LL_TX_FLOW_CONTROL_V2 void dp_tx_flow_control_init(struct dp_soc *); diff --git a/dp/wifi3.0/dp_tx_flow_control.c b/dp/wifi3.0/dp_tx_flow_control.c index 4aecd81352..d11c8ce0bb 100644 --- a/dp/wifi3.0/dp_tx_flow_control.c +++ b/dp/wifi3.0/dp_tx_flow_control.c @@ -274,6 +274,12 @@ struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc, return NULL; } + if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) { + dp_tx_desc_pool_free(soc, flow_pool_id); + qdf_spin_unlock_bh(&pool->flow_pool_lock); + return NULL; + } + stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx); start_threshold = stop_threshold + wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx); @@ -338,6 +344,7 @@ int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool, } /* We have all the descriptors for the pool, we can delete the pool */ + dp_tx_desc_pool_deinit(soc, pool->flow_pool_id); dp_tx_desc_pool_free(soc, pool->flow_pool_id); qdf_spin_unlock_bh(&pool->flow_pool_lock); return 0; @@ -529,8 +536,8 @@ static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc) if (!tx_desc_pool->desc_pages.num_pages) continue; - if (dp_tx_desc_pool_free(soc, i) != QDF_STATUS_SUCCESS) - dp_err("Tx Desc Pool:%d Free failed", i); + dp_tx_desc_pool_deinit(soc, i); + dp_tx_desc_pool_free(soc, i); } } diff --git a/dp/wifi3.0/dp_types.h b/dp/wifi3.0/dp_types.h index e4903f8232..acd9dfb8b9 100644 --- a/dp/wifi3.0/dp_types.h +++ b/dp/wifi3.0/dp_types.h @@ -1123,15 +1123,6 @@ struct dp_soc { struct dp_txrx_pool_stats pool_stats; #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */ - /* - * Re-use memory section ends. reuse memory indicator. - * Everything above this variable "dp_soc_reinit" is retained across - * WiFi up/down for AP use-cases. - * Everything below this variable "dp_soc_reinit" is reset during - * dp_soc_deinit. - */ - bool dp_soc_reinit; - uint32_t wbm_idle_scatter_buf_size; /* VDEVs on this SOC */ @@ -1328,11 +1319,11 @@ struct dp_soc { uint8_t fisa_enable; #endif #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */ - /* Full monitor mode support */ bool full_mon_mode; /* SG supported for msdu continued packets from wbm release ring */ bool wbm_release_desc_rx_sg_support; + bool peer_map_attach_success; }; #ifdef IPA_OFFLOAD @@ -1547,12 +1538,6 @@ struct dp_pdev { /* Stuck count on monitor destination ring MPDU process */ uint32_t mon_dest_ring_stuck_cnt; - /* - * re-use memory section ends - * reuse memory/deinit indicator - * - * DO NOT CHANGE NAME OR MOVE THIS VARIABLE - */ bool pdev_deinit; /* pdev status down or up required to handle dynamic hw diff --git a/wlan_cfg/wlan_cfg.c b/wlan_cfg/wlan_cfg.c index d86b26b193..765a8f4f6f 100644 --- a/wlan_cfg/wlan_cfg.c +++ b/wlan_cfg/wlan_cfg.c @@ -635,6 +635,18 @@ wlan_cfg_pdev_attach(struct cdp_ctrl_objmgr_psoc *psoc) return wlan_cfg_ctx; } +void wlan_cfg_set_mon_delayed_replenish_entries( + struct wlan_cfg_dp_soc_ctxt *cfg, + uint32_t val) +{ + cfg->delayed_replenish_entries = val; +} + +int wlan_cfg_get_mon_delayed_replenish_entries(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->delayed_replenish_entries; +} + void wlan_cfg_pdev_detach(struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx) { if (wlan_cfg_ctx) @@ -1226,6 +1238,17 @@ wlan_cfg_get_dp_caps(struct wlan_cfg_dp_soc_ctxt *cfg, } } +void wlan_cfg_set_tso_desc_attach_defer(struct wlan_cfg_dp_soc_ctxt *cfg, + bool val) +{ + cfg->is_tso_desc_attach_defer = val; +} + +bool wlan_cfg_is_tso_desc_attach_defer(struct wlan_cfg_dp_soc_ctxt *cfg) +{ + return cfg->is_tso_desc_attach_defer; +} + #ifdef QCA_LL_TX_FLOW_CONTROL_V2 /** * wlan_cfg_get_tx_flow_stop_queue_th() - Get flow control stop threshold diff --git a/wlan_cfg/wlan_cfg.h b/wlan_cfg/wlan_cfg.h index a8c8e512b1..cb8e774e37 100644 --- a/wlan_cfg/wlan_cfg.h +++ b/wlan_cfg/wlan_cfg.h @@ -280,6 +280,8 @@ struct wlan_cfg_dp_soc_ctxt { uint8_t *rx_toeplitz_hash_key; uint8_t pktlog_buffer_size; uint8_t is_rx_fisa_enabled; + bool is_tso_desc_attach_defer; + uint32_t delayed_replenish_entries; }; /** @@ -365,6 +367,28 @@ int wlan_cfg_set_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context, int mask); int wlan_cfg_set_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int context, int mask); + +/** + * wlan_cfg_set_mon_delayed_replenish_entries() - number of buffers to replenish + * for monitor buffer ring at initialization + * @wlan_cfg_ctx - Configuration Handle + * @replenish_entries - number of entries to replenish at initialization + * + */ +void wlan_cfg_set_mon_delayed_replenish_entries(struct wlan_cfg_dp_soc_ctxt + *wlan_cfg_ctx, + uint32_t replenish_entries); + +/** + * wlan_cfg_get_mon_delayed_replenish_entries() - get num of buffer to replenish + * for monitor buffer ring at initialization + * @wlan_cfg_ctx - Configuration Handle + * @replenish_entries - number of entries to replenish at initialization + * + * Return: delayed_replenish_entries; + */ +int wlan_cfg_get_mon_delayed_replenish_entries(struct wlan_cfg_dp_soc_ctxt + *wlan_cfg_ctx); /** * wlan_cfg_get_num_contexts() - Number of interrupt contexts to be registered * @wlan_cfg_ctx - Configuration Handle @@ -1290,3 +1314,9 @@ void wlan_cfg_fill_interrupt_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx, */ bool wlan_cfg_is_rx_fisa_enabled(struct wlan_cfg_dp_soc_ctxt *cfg); #endif + +void wlan_cfg_set_tso_desc_attach_defer(struct wlan_cfg_dp_soc_ctxt *cfg, + bool val); + +bool wlan_cfg_is_tso_desc_attach_defer(struct wlan_cfg_dp_soc_ctxt *cfg); +