qcacmn: Reorganise DP init-deinit path to reuse memory

Avoid memory fragmentation that happens during
attach-detach flow.
- Reuse transmit allocated static pool memory across soc up/down.
 These memories are allocated during soc attach.
- Reuse DP source ring memory, DP soc context, DP pdev context
  across soc up/down.
- Reorganise structure members of DP soc and DP pdev so that
  we can zero out structure members across soc up/down
- Add cdp soc init/deinit and cdp pdev init/deinit that
  will be active across soc up/down

Change-Id: I5732453f617bdc16995fda916b645c41845c3ecb
This commit is contained in:
Anish Nataraj
2018-11-24 22:24:56 +05:30
committed by nshrivas
parent 5eb6276a46
commit e9d4c3bf33
12 changed files with 1096 additions and 302 deletions

View File

@@ -273,6 +273,23 @@ cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force);
}
static inline void
cdp_pdev_deinit(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
{
if (!soc || !soc->ops) {
QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
"%s: Invalid Instance:", __func__);
QDF_BUG(0);
return;
}
if (!soc->ops->cmn_drv_ops ||
!soc->ops->cmn_drv_ops->txrx_pdev_deinit)
return;
soc->ops->cmn_drv_ops->txrx_pdev_deinit(pdev, force);
}
static inline void *cdp_peer_create
(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
@@ -1161,6 +1178,115 @@ cdp_soc_detach(ol_txrx_soc_handle soc)
soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc);
}
/**
* cdp_soc_init() - Initialize txrx SOC
* @soc: ol_txrx_soc_handle handle
* @devid: Device ID
* @hif_handle: Opaque HIF handle
* @psoc: Opaque Objmgr handle
* @htc_handle: Opaque HTC handle
* @qdf_dev: QDF device
* @dp_ol_if_ops: Offload Operations
*
* Return: DP SOC handle on success, NULL on failure
*/
static inline ol_txrx_soc_handle
cdp_soc_init(ol_txrx_soc_handle soc, u_int16_t devid, void *hif_handle,
void *psoc, void *htc_handle, qdf_device_t qdf_dev,
struct ol_if_ops *dp_ol_if_ops)
{
if (!soc || !soc->ops) {
QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
"%s: Invalid Instance:", __func__);
QDF_BUG(0);
return NULL;
}
if (!soc->ops->cmn_drv_ops ||
!soc->ops->cmn_drv_ops->txrx_soc_init)
return NULL;
return soc->ops->cmn_drv_ops->txrx_soc_init(soc, psoc,
hif_handle,
htc_handle, qdf_dev,
dp_ol_if_ops, devid);
}
/**
* cdp_soc_deinit() - Deinitialize txrx SOC
* @soc: Opaque DP SOC handle
*
* Return: None
*/
static inline void
cdp_soc_deinit(ol_txrx_soc_handle soc)
{
if (!soc || !soc->ops) {
QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
"%s: Invalid Instance:", __func__);
QDF_BUG(0);
return;
}
if (!soc->ops->cmn_drv_ops ||
!soc->ops->cmn_drv_ops->txrx_soc_deinit)
return;
soc->ops->cmn_drv_ops->txrx_soc_deinit((void *)soc);
}
/**
* cdp_tso_soc_attach() - TSO attach function
* @soc: ol_txrx_soc_handle handle
*
* Reserve TSO descriptor buffers
*
* Return: QDF_STATUS_SUCCESS on Success or
* QDF_STATUS_E_FAILURE on failure
*/
static inline QDF_STATUS
cdp_tso_soc_attach(ol_txrx_soc_handle soc)
{
if (!soc || !soc->ops) {
QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
"%s: Invalid Instance:", __func__);
QDF_BUG(0);
return 0;
}
if (!soc->ops->cmn_drv_ops ||
!soc->ops->cmn_drv_ops->txrx_tso_soc_attach)
return 0;
return soc->ops->cmn_drv_ops->txrx_tso_soc_attach((void *)soc);
}
/**
* cdp_tso_soc_detach() - TSO detach function
* @soc: ol_txrx_soc_handle handle
*
* Release TSO descriptor buffers
*
* Return: QDF_STATUS_SUCCESS on Success or
* QDF_STATUS_E_FAILURE on failure
*/
static inline QDF_STATUS
cdp_tso_soc_detach(ol_txrx_soc_handle soc)
{
if (!soc || !soc->ops) {
QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
"%s: Invalid Instance:", __func__);
QDF_BUG(0);
return 0;
}
if (!soc->ops->cmn_drv_ops ||
!soc->ops->cmn_drv_ops->txrx_tso_soc_detach)
return 0;
return soc->ops->cmn_drv_ops->txrx_tso_soc_detach((void *)soc);
}
/**
* cdp_addba_resp_tx_completion() - Indicate addba response tx
* completion to dp to change tid state.

View File

@@ -43,10 +43,26 @@ ol_txrx_soc_handle ol_txrx_soc_attach(void *scn_handle, struct ol_if_ops *dp_ol_
*
* Return: DP SOC handle on success, NULL on failure
*/
/**
* dp_soc_init_wifi3() - Initialize txrx SOC
* @soc: Opaque DP SOC handle
* @ctrl_psoc: Opaque SOC handle from control plane
* @hif_handle: Opaque HIF handle
* @htc_handle: Opaque HTC handle
* @qdf_osdev: QDF device
* @ol_ops: Offload Operations
* @device_id: Device ID
*
* Return: DP SOC handle on success, NULL on failure
*/
#ifdef QCA_WIFI_QCA8074
void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id);
void *dp_soc_init_wifi3(void *soc, void *ctrl_psoc, void *hif_handle,
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id);
#else
static inline void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
HTC_HANDLE htc_handle,
@@ -56,6 +72,14 @@ static inline void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
{
return NULL;
}
static inline
void *dp_soc_init_wifi3(void *soc, void *ctrl_psoc, void *hif_handle,
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id)
{
return NULL;
}
#endif /* QCA_WIFI_QCA8074 */
static inline ol_txrx_soc_handle cdp_soc_attach(u_int16_t devid,

View File

@@ -83,6 +83,15 @@ struct cdp_cmn_ops {
void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
/**
* txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
* @pdev: Dp pdev handle
* @force: Force deinit or not
*
* Return: None
*/
void (*txrx_pdev_deinit)(struct cdp_pdev *pdev, int force);
void *(*txrx_peer_create)
(struct cdp_vdev *vdev, uint8_t *peer_mac_addr,
struct cdp_ctrl_objmgr_peer *ctrl_peer);
@@ -289,6 +298,43 @@ struct cdp_cmn_ops {
void (*txrx_soc_detach)(void *soc);
/**
* txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
* @soc: Opaque Dp handle
*
* Return: None
*/
void (*txrx_soc_deinit)(void *soc);
/**
* txrx_soc_init() - Initialize dp soc and dp ring memory
* @soc: Opaque Dp handle
* @htchdl: Opaque htc handle
* @hifhdl: Opaque hif handle
*
* Return: None
*/
void *(*txrx_soc_init)(void *soc, void *ctrl_psoc, void *hif_handle,
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id);
/**
* txrx_tso_soc_attach() - TSO attach handler triggered during
* dynamic tso activation
* @soc: Opaque Dp handle
*
* Return: QDF status
*/
QDF_STATUS (*txrx_tso_soc_attach)(void *soc);
/**
* txrx_tso_soc_detach() - TSO detach handler triggered during
* dynamic tso de-activation
* @soc: Opaque Dp handle
*
* Return: QDF status
*/
QDF_STATUS (*txrx_tso_soc_detach)(void *soc);
int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid,
int status);

View File

@@ -3243,8 +3243,8 @@ htt_htc_soc_attach(struct htt_soc *soc)
}
/*
* htt_soc_attach() - SOC level HTT initialization
* @dp_soc: Opaque Data path SOC handle
* htt_soc_initialize() - SOC level HTT initialization
* @htt_soc: Opaque htt SOC handle
* @ctrl_psoc: Opaque ctrl SOC handle
* @htc_soc: SOC level HTC handle
* @hal_soc: Opaque HAL SOC handle
@@ -3253,57 +3253,52 @@ htt_htc_soc_attach(struct htt_soc *soc)
* Return: HTT handle on success; NULL on failure
*/
void *
htt_soc_attach(void *dp_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
htt_soc_initialize(void *htt_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
void *hal_soc, qdf_device_t osdev)
{
struct htt_soc *soc;
int i;
soc = qdf_mem_malloc(sizeof(*soc));
if (!soc)
goto fail1;
struct htt_soc *soc = (struct htt_soc *)htt_soc;
soc->osdev = osdev;
soc->ctrl_psoc = ctrl_psoc;
soc->dp_soc = dp_soc;
soc->htc_soc = htc_soc;
soc->hal_soc = hal_soc;
/* TODO: See if any NSS related context is required in htt_soc */
soc->htt_htc_pkt_freelist = NULL;
if (htt_htc_soc_attach(soc))
goto fail2;
/* TODO: See if any Rx data specific intialization is required. For
* MCL use cases, the data will be received as single packet and
* should not required any descriptor or reorder handling
return soc;
fail2:
return NULL;
}
/*
* htt_soc_htc_prealloc() - HTC memory prealloc
* @htt_soc: SOC level HTT handle
*
* Return: QDF_STATUS_SUCCESS on Success or
* QDF_STATUS_E_NOMEM on allocation failure
*/
QDF_STATUS
htt_soc_htc_prealloc(struct htt_soc *soc)
{
int i;
HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
soc->htt_htc_pkt_freelist = NULL;
/* pre-allocate some HTC_PACKET objects */
for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
struct dp_htt_htc_pkt_union *pkt;
pkt = qdf_mem_malloc(sizeof(*pkt));
if (!pkt)
break;
return QDF_STATUS_E_NOMEM;
htt_htc_pkt_free(soc, &pkt->u.pkt);
}
return soc;
fail2:
qdf_mem_free(soc);
fail1:
return NULL;
return QDF_STATUS_SUCCESS;
}
/*
* htt_soc_detach() - Detach SOC level HTT
* @htt_soc: HTT SOC handle
@@ -3312,12 +3307,16 @@ void
htt_soc_detach(void *htt_soc)
{
struct htt_soc *soc = (struct htt_soc *)htt_soc;
struct dp_soc *dpsoc = soc->dp_soc;
if (dpsoc->dp_soc_reinit) {
htt_htc_misc_pkt_pool_free(soc);
htt_htc_pkt_pool_free(soc);
} else {
HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
qdf_mem_free(soc);
}
}
/**
* dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW

View File

@@ -137,10 +137,30 @@ struct htt_rx_ring_tlv_filter {
u_int16_t md_ctrl_filter;
};
/*
* htt_soc_initialize() - SOC level HTT initialization
* @htt_soc: Opaque htt SOC handle
* @ctrl_psoc: Opaque ctrl SOC handle
* @htc_soc: SOC level HTC handle
* @hal_soc: Opaque HAL SOC handle
* @osdev: QDF device
*
* Return: HTT handle on success; NULL on failure
*/
void *
htt_soc_attach(void *txrx_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
htt_soc_initialize(void *htt_soc, void *ctrl_psoc, HTC_HANDLE htc_soc,
void *hal_soc, qdf_device_t osdev);
/*
* htt_soc_htc_prealloc() - HTC memory prealloc
* @htt_soc: SOC level HTT handle
*
* Return: QDF_STATUS_SUCCESS on success or
* QDF_STATUS_E_NO_MEM on allocation failure
*/
QDF_STATUS
htt_soc_htc_prealloc(struct htt_soc *htt_soc);
void htt_soc_detach(void *soc);
int htt_srng_setup(void *htt_soc, int pdev_id, void *hal_srng,

View File

@@ -62,6 +62,11 @@ extern int con_mode_monitor;
#include <pktlog_ac.h>
#endif
#endif
void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle);
static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
static struct dp_soc *
dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id);
static void dp_pktlogmod_exit(struct dp_pdev *handle);
static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
uint8_t *peer_mac_addr,
@@ -1037,9 +1042,14 @@ static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
srng->hal_srng = NULL;
srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
srng->num_entries = num_entries;
srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
soc->osdev, soc->osdev->dev, srng->alloc_size,
&(srng->base_paddr_unaligned));
if (!soc->dp_soc_reinit) {
srng->base_vaddr_unaligned =
qdf_mem_alloc_consistent(soc->osdev,
soc->osdev->dev,
srng->alloc_size,
&srng->base_paddr_unaligned);
}
if (!srng->base_vaddr_unaligned) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -1124,6 +1134,20 @@ static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
return 0;
}
/*
* dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
* @soc: DP SOC handle
* @srng: source ring structure
* @ring_type: type of ring
* @ring_num: ring number
*
* Return: None
*/
static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
int ring_type, int ring_num)
{
}
/**
* dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
* Any buffers allocated and attached to ring entries are expected to be freed
@@ -1677,6 +1701,7 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
uint32_t entry_size, num_entries;
int i;
uint32_t desc_id = 0;
qdf_dma_addr_t *baseaddr = NULL;
/* Only Tx queue descriptors are allocated from common link descriptor
* pool Rx queue descriptors are not included in this because (REO queue
@@ -1726,10 +1751,15 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
total_mem_size, num_link_desc_banks);
for (i = 0; i < num_link_desc_banks; i++) {
if (!soc->dp_soc_reinit) {
baseaddr = &soc->link_desc_banks[i].
base_paddr_unaligned;
soc->link_desc_banks[i].base_vaddr_unaligned =
qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
qdf_mem_alloc_consistent(soc->osdev,
soc->osdev->dev,
max_alloc_size,
&(soc->link_desc_banks[i].base_paddr_unaligned));
baseaddr);
}
soc->link_desc_banks[i].size = max_alloc_size;
soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
@@ -1755,10 +1785,15 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
/* Allocate last bank in case total memory required is not exact
* multiple of max_alloc_size
*/
if (!soc->dp_soc_reinit) {
baseaddr = &soc->link_desc_banks[i].
base_paddr_unaligned;
soc->link_desc_banks[i].base_vaddr_unaligned =
qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
qdf_mem_alloc_consistent(soc->osdev,
soc->osdev->dev,
last_bank_size,
&(soc->link_desc_banks[i].base_paddr_unaligned));
baseaddr);
}
soc->link_desc_banks[i].size = last_bank_size;
soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
@@ -1822,6 +1857,7 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
uint32_t rem_entries;
uint8_t *scatter_buf_ptr;
uint16_t scatter_buf_num;
uint32_t buf_size = 0;
soc->wbm_idle_scatter_buf_size =
hal_idle_list_scatter_buf_size(soc->hal_soc);
@@ -1838,15 +1874,20 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
}
for (i = 0; i < num_scatter_bufs; i++) {
baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
if (!soc->dp_soc_reinit) {
buf_size = soc->wbm_idle_scatter_buf_size;
soc->wbm_idle_scatter_buf_base_vaddr[i] =
qdf_mem_alloc_consistent(soc->osdev,
soc->osdev->dev,
soc->wbm_idle_scatter_buf_size,
&(soc->wbm_idle_scatter_buf_base_paddr[i]));
soc->osdev->
dev,
buf_size,
baseaddr);
}
if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR,
FL("Scatter list memory alloc failed"));
FL("Scatter lst memory alloc fail"));
goto fail;
}
}
@@ -2447,7 +2488,6 @@ static int dp_soc_cmn_setup(struct dp_soc *soc)
goto fail1;
}
soc->num_tcl_data_rings = 0;
/* Tx data rings */
if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
@@ -2971,7 +3011,12 @@ static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
int nss_cfg;
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
struct dp_pdev *pdev = NULL;
if (soc->dp_soc_reinit)
pdev = soc->pdev_list[pdev_id];
else
pdev = qdf_mem_malloc(sizeof(*pdev));
if (!pdev) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -2979,6 +3024,11 @@ static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
goto fail0;
}
/*
* Variable to prevent double pdev deinitialization during
* radio detach execution .i.e. in the absence of any vdev.
*/
pdev->pdev_deinit = 0;
pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
if (!pdev->invalid_peer) {
@@ -3010,8 +3060,9 @@ static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
pdev->soc = soc;
pdev->ctrl_pdev = ctrl_pdev;
pdev->pdev_id = pdev_id;
pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
soc->pdev_list[pdev_id] = pdev;
pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
soc->pdev_count++;
TAILQ_INIT(&pdev->vdev_list);
@@ -3170,7 +3221,7 @@ static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
return (struct cdp_pdev *)pdev;
fail1:
dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
dp_pdev_detach((struct cdp_pdev *)pdev, 0);
fail0:
return NULL;
@@ -3249,17 +3300,9 @@ static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
}
#if !defined(DISABLE_MON_CONFIG)
/**
* dp_mon_ring_deinit() - Cleanup Monitor rings
*
* @soc: soc handle
* @pdev: datapath physical dev handle
* @mac_id: mac number
*
* Return: None
*/
static
void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
int mac_id)
{
if (soc->wlan_cfg_ctx->rxdma1_enable) {
@@ -3294,33 +3337,78 @@ void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
}
#else
static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
int mac_id)
{
}
#endif
/*
* dp_pdev_detach_wifi3() - detach txrx pdev
* @txrx_pdev: Datapath PDEV handle
* @force: Force detach
/**
* dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
*
* @soc: soc handle
* @pdev: datapath physical dev handle
* @mac_id: mac number
*
* Return: None
*/
static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
int mac_id)
{
}
/**
* dp_pdev_mem_reset() - Reset txrx pdev memory
* @pdev: dp pdev handle
*
* Return: None
*/
static void dp_pdev_mem_reset(struct dp_pdev *pdev)
{
uint16_t len = 0;
uint8_t *dp_pdev_offset = (uint8_t *)pdev;
len = sizeof(struct dp_pdev) -
offsetof(struct dp_pdev, pdev_deinit) -
sizeof(pdev->pdev_deinit);
dp_pdev_offset = dp_pdev_offset +
offsetof(struct dp_pdev, pdev_deinit) +
sizeof(pdev->pdev_deinit);
qdf_mem_zero(dp_pdev_offset, len);
}
/**
* dp_pdev_deinit() - Deinit txrx pdev
* @txrx_pdev: Datapath PDEV handle
* @force: Force deinit
*
* Return: None
*/
static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
{
struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
struct dp_soc *soc = pdev->soc;
qdf_nbuf_t curr_nbuf, next_nbuf;
int mac_id;
/*
* Prevent double pdev deinitialization during radio detach
* execution .i.e. in the absence of any vdev
*/
if (pdev->pdev_deinit)
return;
pdev->pdev_deinit = 1;
dp_wdi_event_detach(pdev);
dp_tx_pdev_detach(pdev);
if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
TCL_DATA, pdev->pdev_id);
dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
WBM2SW_RELEASE, pdev->pdev_id);
}
@@ -3338,17 +3426,17 @@ static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
/* Cleanup per PDEV REO rings if configured */
if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
REO_DST, pdev->pdev_id);
}
dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
dp_rxdma_ring_cleanup(soc, pdev);
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
dp_mon_ring_deinit(soc, pdev, mac_id);
dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
RXDMA_DST, 0);
}
@@ -3358,20 +3446,94 @@ static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
qdf_nbuf_free(curr_nbuf);
curr_nbuf = next_nbuf;
}
pdev->invalid_peer_head_msdu = NULL;
pdev->invalid_peer_tail_msdu = NULL;
dp_htt_ppdu_stats_detach(pdev);
qdf_nbuf_free(pdev->sojourn_buf);
dp_cal_client_detach(&pdev->cal_client_ctx);
soc->pdev_list[pdev->pdev_id] = NULL;
soc->pdev_count--;
wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
qdf_mem_free(pdev->invalid_peer);
qdf_mem_free(pdev->dp_txrx_handle);
dp_pdev_mem_reset(pdev);
}
/**
* dp_pdev_deinit_wifi3() - Deinit txrx pdev
* @txrx_pdev: Datapath PDEV handle
* @force: Force deinit
*
* Return: None
*/
static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
{
dp_pdev_deinit(txrx_pdev, force);
}
/*
* dp_pdev_detach() - Complete rest of pdev detach
* @txrx_pdev: Datapath PDEV handle
* @force: Force deinit
*
* Return: None
*/
static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
{
struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
struct dp_soc *soc = pdev->soc;
int mac_id;
if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
TCL_DATA, pdev->pdev_id);
dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
WBM2SW_RELEASE, pdev->pdev_id);
}
dp_mon_link_free(pdev);
/* Cleanup per PDEV REO rings if configured */
if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
REO_DST, pdev->pdev_id);
}
dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
dp_mon_ring_cleanup(soc, pdev, mac_id);
dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
RXDMA_DST, 0);
}
soc->pdev_list[pdev->pdev_id] = NULL;
qdf_mem_free(pdev);
}
/*
* dp_pdev_detach_wifi3() - detach txrx pdev
* @txrx_pdev: Datapath PDEV handle
* @force: Force detach
*
* Return: None
*/
static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
{
struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
struct dp_soc *soc = pdev->soc;
if (soc->dp_soc_reinit) {
dp_pdev_detach(txrx_pdev, force);
} else {
dp_pdev_deinit(txrx_pdev, force);
dp_pdev_detach(txrx_pdev, force);
}
}
/*
* dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
* @soc: DP SOC handle
@@ -3397,17 +3559,46 @@ static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
}
/*
* dp_soc_detach_wifi3() - Detach txrx SOC
* @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
/**
* dp_soc_mem_reset() - Reset Dp Soc memory
* @soc: DP handle
*
* Return: None
*/
static void dp_soc_detach_wifi3(void *txrx_soc)
static void dp_soc_mem_reset(struct dp_soc *soc)
{
uint16_t len = 0;
uint8_t *dp_soc_offset = (uint8_t *)soc;
len = sizeof(struct dp_soc) -
offsetof(struct dp_soc, dp_soc_reinit) -
sizeof(soc->dp_soc_reinit);
dp_soc_offset = dp_soc_offset +
offsetof(struct dp_soc, dp_soc_reinit) +
sizeof(soc->dp_soc_reinit);
qdf_mem_zero(dp_soc_offset, len);
}
/**
* dp_soc_deinit() - Deinitialize txrx SOC
* @txrx_soc: Opaque DP SOC handle
*
* Return: None
*/
static void dp_soc_deinit(void *txrx_soc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
int i;
qdf_atomic_set(&soc->cmn_init_done, 0);
for (i = 0; i < MAX_PDEV_CNT; i++) {
if (soc->pdev_list[i])
dp_pdev_deinit((struct cdp_pdev *)
soc->pdev_list[i], 1);
}
qdf_flush_work(&soc->htt_stats.work);
qdf_disable_work(&soc->htt_stats.work);
@@ -3416,23 +3607,114 @@ static void dp_soc_detach_wifi3(void *txrx_soc)
dp_reo_cmdlist_destroy(soc);
for (i = 0; i < MAX_PDEV_CNT; i++) {
if (soc->pdev_list[i])
dp_pdev_detach_wifi3(
(struct cdp_pdev *)soc->pdev_list[i], 1);
dp_peer_find_detach(soc);
/* Free the ring memories */
/* Common rings */
dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
/* Tx data rings */
if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
for (i = 0; i < soc->num_tcl_data_rings; i++) {
dp_srng_deinit(soc, &soc->tcl_data_ring[i],
TCL_DATA, i);
dp_srng_deinit(soc, &soc->tx_comp_ring[i],
WBM2SW_RELEASE, i);
}
}
dp_peer_find_detach(soc);
/* TCL command and status rings */
dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
/* Rx data rings */
if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
soc->num_reo_dest_rings =
wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
for (i = 0; i < soc->num_reo_dest_rings; i++) {
/* TODO: Get number of rings and ring sizes
* from wlan_cfg
*/
dp_srng_deinit(soc, &soc->reo_dest_ring[i],
REO_DST, i);
}
}
/* REO reinjection ring */
dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
/* Rx release ring */
dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
/* Rx exception ring */
/* TODO: Better to store ring_type and ring_num in
* dp_srng during setup
*/
dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
/* REO command and status rings */
dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
qdf_spinlock_destroy(&soc->peer_ref_mutex);
qdf_spinlock_destroy(&soc->htt_stats.lock);
htt_soc_detach(soc->htt_handle);
qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
dp_reo_cmdlist_destroy(soc);
qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
dp_reo_desc_freelist_destroy(soc);
dp_soc_wds_detach(soc);
qdf_spinlock_destroy(&soc->ast_lock);
dp_soc_mem_reset(soc);
}
/**
* dp_soc_deinit_wifi3() - Deinitialize txrx SOC
* @txrx_soc: Opaque DP SOC handle
*
* Return: None
*/
static void dp_soc_deinit_wifi3(void *txrx_soc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
soc->dp_soc_reinit = 1;
dp_soc_deinit(txrx_soc);
}
/*
* dp_soc_detach() - Detach rest of txrx SOC
* @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
*
* Return: None
*/
static void dp_soc_detach(void *txrx_soc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
int i;
qdf_atomic_set(&soc->cmn_init_done, 0);
/* TBD: Call Tx and Rx cleanup functions to free buffers and
* SW descriptors
*/
for (i = 0; i < MAX_PDEV_CNT; i++) {
if (soc->pdev_list[i])
dp_pdev_detach((struct cdp_pdev *)
soc->pdev_list[i], 1);
}
/* Free the ring memories */
/* Common rings */
dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
dp_tx_soc_detach(soc);
/* Tx data rings */
if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
for (i = 0; i < soc->num_tcl_data_rings; i++) {
@@ -3476,24 +3758,33 @@ static void dp_soc_detach_wifi3(void *txrx_soc)
dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
dp_hw_link_desc_pool_cleanup(soc);
qdf_spinlock_destroy(&soc->peer_ref_mutex);
qdf_spinlock_destroy(&soc->htt_stats.lock);
soc->dp_soc_reinit = 0;
htt_soc_detach(soc->htt_handle);
qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
dp_reo_desc_freelist_destroy(soc);
wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
dp_soc_wds_detach(soc);
qdf_spinlock_destroy(&soc->ast_lock);
qdf_mem_free(soc);
}
/*
* dp_soc_detach_wifi3() - Detach txrx SOC
* @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
*
* Return: None
*/
static void dp_soc_detach_wifi3(void *txrx_soc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
if (soc->dp_soc_reinit) {
dp_soc_detach(txrx_soc);
} else {
dp_soc_deinit(txrx_soc);
dp_soc_detach(txrx_soc);
}
}
#if !defined(DISABLE_MON_CONFIG)
/**
* dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
@@ -8473,6 +8764,7 @@ static struct cdp_cmn_ops dp_ops_cmn = {
.txrx_vdev_detach = dp_vdev_detach_wifi3,
.txrx_pdev_attach = dp_pdev_attach_wifi3,
.txrx_pdev_detach = dp_pdev_detach_wifi3,
.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
.txrx_peer_create = dp_peer_create_wifi3,
.txrx_peer_setup = dp_peer_setup_wifi3,
#ifdef FEATURE_AST
@@ -8502,6 +8794,10 @@ static struct cdp_cmn_ops dp_ops_cmn = {
.txrx_peer_delete = dp_peer_delete_wifi3,
.txrx_vdev_register = dp_vdev_register_wifi3,
.txrx_soc_detach = dp_soc_detach_wifi3,
.txrx_soc_deinit = dp_soc_deinit_wifi3,
.txrx_soc_init = dp_soc_init_wifi3,
.txrx_tso_soc_attach = dp_tso_soc_attach,
.txrx_tso_soc_detach = dp_tso_soc_detach,
.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
@@ -8883,6 +9179,9 @@ static void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
}
#ifdef QCA_WIFI_QCA8074
#ifndef QCA_MEM_ATTACH_ON_WIFI3
/**
* dp_soc_attach_wifi3() - Attach txrx SOC
* @ctrl_psoc: Opaque SOC handle from control plane
@@ -8898,13 +9197,66 @@ void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id)
{
struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
int target_type;
struct dp_soc *dp_soc = NULL;
dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
ol_ops, device_id);
if (!dp_soc)
return NULL;
if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
return NULL;
return (void *)dp_soc;
}
#else
/**
* dp_soc_attach_wifi3() - Attach txrx SOC
* @ctrl_psoc: Opaque SOC handle from control plane
* @htc_handle: Opaque HTC handle
* @hif_handle: Opaque HIF handle
* @qdf_osdev: QDF device
* @ol_ops: Offload Operations
* @device_id: Device ID
*
* Return: DP SOC handle on success, NULL on failure
*/
void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id)
{
struct dp_soc *dp_soc = NULL;
dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
ol_ops, device_id);
return (void *)dp_soc;
}
#endif
/**
* dp_soc_attach() - Attach txrx SOC
* @ctrl_psoc: Opaque SOC handle from control plane
* @htc_handle: Opaque HTC handle
* @qdf_osdev: QDF device
* @ol_ops: Offload Operations
* @device_id: Device ID
*
* Return: DP SOC handle on success, NULL on failure
*/
static struct dp_soc *
dp_soc_attach(void *ctrl_psoc, HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id)
{
int int_ctx;
struct dp_soc *soc = NULL;
struct htt_soc *htt_soc = NULL;
soc = qdf_mem_malloc(sizeof(*soc));
if (!soc) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("DP SOC memory allocation failed"));
dp_err("DP SOC memory allocation failed");
goto fail0;
}
@@ -8914,25 +9266,57 @@ void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
soc->cdp_soc.ol_ops = ol_ops;
soc->ctrl_psoc = ctrl_psoc;
soc->osdev = qdf_osdev;
soc->hif_handle = hif_handle;
soc->hal_soc = hif_get_hal_handle(hif_handle);
soc->htt_handle = htt_soc_attach(soc, ctrl_psoc, htc_handle,
soc->hal_soc, qdf_osdev);
soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
if (!soc->htt_handle) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("HTT attach failed"));
goto fail1;
}
soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
if (!soc->wlan_cfg_ctx) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("wlan_cfg_soc_attach failed"));
goto fail2;
dp_err("wlan_cfg_ctx failed\n");
goto fail1;
}
htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
if (!htt_soc) {
dp_err("HTT attach failed");
goto fail1;
}
soc->htt_handle = htt_soc;
htt_soc->dp_soc = soc;
htt_soc->htc_soc = htc_handle;
if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
goto fail2;
return (void *)soc;
fail2:
qdf_mem_free(htt_soc);
fail1:
qdf_mem_free(soc);
fail0:
return NULL;
}
/**
* dp_soc_init() - Initialize txrx SOC
* @dp_soc: Opaque DP SOC handle
* @htc_handle: Opaque HTC handle
* @hif_handle: Opaque HIF handle
*
* Return: DP SOC handle on success, NULL on failure
*/
void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle, void *hif_handle)
{
int target_type;
struct dp_soc *soc = (struct dp_soc *)dpsoc;
struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
htt_soc->htc_soc = htc_handle;
soc->hif_handle = hif_handle;
soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
if (!soc->hal_soc)
return NULL;
htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc, htt_soc->htc_soc,
soc->hal_soc, soc->osdev);
target_type = hal_get_target_type(soc->hal_soc);
switch (target_type) {
case TARGET_TYPE_QCA6290:
@@ -8978,7 +9362,7 @@ void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
}
wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
cfg_get(ctrl_psoc, CFG_DP_RX_HASH));
cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
soc->cce_disable = false;
if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
@@ -9007,15 +9391,29 @@ void *dp_soc_attach_wifi3(void *ctrl_psoc, void *hif_handle,
/* initialize work queue for stats processing */
qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
return (void *)soc;
return soc;
fail2:
htt_soc_detach(soc->htt_handle);
fail1:
qdf_mem_free(soc);
fail0:
return NULL;
}
/**
* dp_soc_init_wifi3() - Initialize txrx SOC
* @dp_soc: Opaque DP SOC handle
* @ctrl_psoc: Opaque SOC handle from control plane(Unused)
* @hif_handle: Opaque HIF handle
* @htc_handle: Opaque HTC handle
* @qdf_osdev: QDF device (Unused)
* @ol_ops: Offload Operations (Unused)
* @device_id: Device ID (Unused)
*
* Return: DP SOC handle on success, NULL on failure
*/
void *dp_soc_init_wifi3(void *dpsoc, void *ctrl_psoc, void *hif_handle,
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id)
{
return dp_soc_init(dpsoc, htc_handle, hif_handle);
}
#endif
/*
@@ -9066,12 +9464,13 @@ void dp_is_hw_dbs_enable(struct dp_soc *soc,
int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
bool enable)
{
struct dp_soc *soc = pdev->soc;
struct dp_soc *soc = NULL;
struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
int max_mac_rings = wlan_cfg_get_num_mac_rings
(pdev->wlan_cfg_ctx);
uint8_t mac_id = 0;
soc = pdev->soc;
dp_is_hw_dbs_enable(soc, &max_mac_rings);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,

View File

@@ -43,6 +43,18 @@ QDF_STATUS dp_rx_pdev_mon_detach(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int mac_id);
QDF_STATUS dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id);
/**
* dp_mon_link_free() - free monitor link desc pool
* @pdev: core txrx pdev context
*
* This function will release DP link desc pool for monitor mode from
* main device context.
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS dp_mon_link_free(struct dp_pdev *pdev);
uint32_t dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota);
QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id,

View File

@@ -1148,6 +1148,7 @@ QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
uint32_t num_replenish_buf;
struct dp_srng *dp_srng;
int i;
qdf_dma_addr_t *baseaddr = NULL;
dp_srng = &dp_pdev->rxdma_mon_desc_ring[mac_for_pdev];
@@ -1178,25 +1179,32 @@ QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
}
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
"%s: total_mem_size: %d, num_link_desc_banks: %u, \
max_alloc_size: %d last_bank_size: %d",
__func__, total_mem_size, num_link_desc_banks, max_alloc_size,
last_bank_size);
"%s: total_mem_size: %d, num_link_desc_banks: %u",
__func__, total_mem_size, num_link_desc_banks);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
"%s: max_alloc_size: %d last_bank_size: %d",
__func__, max_alloc_size, last_bank_size);
for (i = 0; i < num_link_desc_banks; i++) {
dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr_unaligned =
qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
baseaddr = &dp_pdev->link_desc_banks[mac_for_pdev][i].
base_paddr_unaligned;
if (!soc->dp_soc_reinit) {
dp_pdev->link_desc_banks[mac_for_pdev][i].
base_vaddr_unaligned =
qdf_mem_alloc_consistent(soc->osdev,
soc->osdev->dev,
max_alloc_size,
&(dp_pdev->link_desc_banks[mac_for_pdev][i].
base_paddr_unaligned));
baseaddr);
if (!dp_pdev->link_desc_banks[mac_for_pdev][i].
base_vaddr_unaligned) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: Link desc memory allocation failed",
QDF_TRACE(QDF_MODULE_ID_TXRX,
QDF_TRACE_LEVEL_ERROR,
"%s: Link desc mem alloc failed",
__func__);
goto fail;
}
}
dp_pdev->link_desc_banks[mac_for_pdev][i].size = max_alloc_size;
dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr =
@@ -1223,19 +1231,25 @@ QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
/* Allocate last bank in case total memory required is not exact
* multiple of max_alloc_size
*/
dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr_unaligned =
baseaddr = &dp_pdev->link_desc_banks[mac_for_pdev][i].
base_paddr_unaligned;
if (!soc->dp_soc_reinit) {
dp_pdev->link_desc_banks[mac_for_pdev][i].
base_vaddr_unaligned =
qdf_mem_alloc_consistent(soc->osdev,
soc->osdev->dev, last_bank_size,
&(dp_pdev->link_desc_banks[mac_for_pdev][i].
base_paddr_unaligned));
soc->osdev->dev,
last_bank_size,
baseaddr);
if (dp_pdev->link_desc_banks[mac_for_pdev][i].
base_vaddr_unaligned == NULL) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: allocation failed for mon link desc pool",
if (!dp_pdev->link_desc_banks[mac_for_pdev][i].
base_vaddr_unaligned) {
QDF_TRACE(QDF_MODULE_ID_TXRX,
QDF_TRACE_LEVEL_ERROR,
"%s: alloc fail:mon link desc pool",
__func__);
goto fail;
}
}
dp_pdev->link_desc_banks[mac_for_pdev][i].size = last_bank_size;
dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr =
@@ -1436,6 +1450,20 @@ dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
return QDF_STATUS_SUCCESS;
}
QDF_STATUS
dp_mon_link_free(struct dp_pdev *pdev) {
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
int mac_id;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
dp_mon_link_desc_pool_cleanup(soc, mac_for_pdev);
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_rx_pdev_mon_detach() - detach dp rx for monitor mode
* @pdev: core txrx pdev context
@@ -1450,14 +1478,12 @@ dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
QDF_STATUS
dp_rx_pdev_mon_detach(struct dp_pdev *pdev) {
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
int mac_id;
qdf_spinlock_destroy(&pdev->mon_lock);
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
dp_mon_link_desc_pool_cleanup(soc, mac_for_pdev);
dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev);
dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev);
}
@@ -1474,4 +1500,9 @@ QDF_STATUS
dp_rx_pdev_mon_detach(struct dp_pdev *pdev) {
return QDF_STATUS_SUCCESS;
}
QDF_STATUS
dp_mon_link_free(struct dp_pdev *pdev) {
return QDF_STATUS_SUCCESS;
}
#endif /* DISABLE_MON_CONFIG */

View File

@@ -3484,6 +3484,117 @@ static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
#ifndef QCA_MEM_ATTACH_ON_WIFI3
/**
* dp_tso_attach_wifi3() - TSO attach handler
* @txrx_soc: Opaque Dp handle
*
* Reserve TSO descriptor buffers
*
* Return: QDF_STATUS_E_FAILURE on failure or
* QDF_STATUS_SUCCESS on success
*/
static
QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
{
return dp_tso_soc_attach(txrx_soc);
}
/**
* dp_tso_detach_wifi3() - TSO Detach handler
* @txrx_soc: Opaque Dp handle
*
* Deallocate TSO descriptor buffers
*
* Return: QDF_STATUS_E_FAILURE on failure or
* QDF_STATUS_SUCCESS on success
*/
static
QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
{
return dp_tso_soc_detach(txrx_soc);
}
#else
static
QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
{
return QDF_STATUS_SUCCESS;
}
static
QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
{
return QDF_STATUS_SUCCESS;
}
#endif
QDF_STATUS dp_tso_soc_detach(void *txrx_soc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
uint8_t i;
uint8_t num_pool;
uint32_t num_desc;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
for (i = 0; i < num_pool; i++)
dp_tx_tso_desc_pool_free(soc, i);
dp_info("%s TSO Desc Pool %d Free descs = %d",
__func__, num_pool, num_desc);
for (i = 0; i < num_pool; i++)
dp_tx_tso_num_seg_pool_free(soc, i);
dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
__func__, num_pool, num_desc);
return QDF_STATUS_SUCCESS;
}
/**
* dp_tso_attach() - TSO attach handler
* @txrx_soc: Opaque Dp handle
*
* Reserve TSO descriptor buffers
*
* Return: QDF_STATUS_E_FAILURE on failure or
* QDF_STATUS_SUCCESS on success
*/
QDF_STATUS dp_tso_soc_attach(void *txrx_soc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
uint8_t i;
uint8_t num_pool;
uint32_t num_desc;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
for (i = 0; i < num_pool; i++) {
if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
dp_err("TSO Desc Pool alloc %d failed %pK",
i, soc);
return QDF_STATUS_E_FAILURE;
}
}
dp_info("%s TSO Desc Alloc %d, descs = %d",
__func__, num_pool, num_desc);
for (i = 0; i < num_pool; i++) {
if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
dp_err("TSO Num of seg Pool alloc %d failed %pK",
i, soc);
return QDF_STATUS_E_FAILURE;
}
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_tx_soc_detach() - detach soc from dp tx
* @soc: core txrx main context
@@ -3500,6 +3611,7 @@ QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
uint16_t num_desc;
uint16_t num_ext_desc;
uint8_t i;
QDF_STATUS status = QDF_STATUS_SUCCESS;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
@@ -3525,22 +3637,9 @@ QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
"%s MSDU Ext Desc Pool %d Free descs = %d",
__func__, num_pool, num_ext_desc);
for (i = 0; i < num_pool; i++) {
dp_tx_tso_desc_pool_free(soc, i);
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s TSO Desc Pool %d Free descs = %d",
__func__, num_pool, num_desc);
for (i = 0; i < num_pool; i++)
dp_tx_tso_num_seg_pool_free(soc, i);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s TSO Num of seg Desc Pool %d Free descs = %d",
__func__, num_pool, num_desc);
status = dp_tso_detach_wifi3(soc);
if (status != QDF_STATUS_SUCCESS)
return status;
return QDF_STATUS_SUCCESS;
}
@@ -3561,6 +3660,7 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
uint8_t num_pool;
uint32_t num_desc;
uint32_t num_ext_desc;
QDF_STATUS status = QDF_STATUS_SUCCESS;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
@@ -3590,33 +3690,10 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
"%s MSDU Ext Desc Alloc %d, descs = %d",
__func__, num_pool, num_ext_desc);
for (i = 0; i < num_pool; i++) {
if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"TSO Desc Pool alloc %d failed %pK",
i, soc);
status = dp_tso_attach_wifi3((void *)soc);
if (status != QDF_STATUS_SUCCESS)
goto fail;
}
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s TSO Desc Alloc %d, descs = %d",
__func__, num_pool, num_desc);
for (i = 0; i < num_pool; i++) {
if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"TSO Num of seg Pool alloc %d failed %pK",
i, soc);
goto fail;
}
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s TSO Num of seg pool Alloc %d, descs = %d",
__func__, num_pool, num_desc);
/* Initialize descriptors in TCL Rings */
if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {

View File

@@ -142,6 +142,28 @@ void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc);
QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc);
/**
* dp_tso_attach() - TSO Attach handler
* @txrx_soc: Opaque Dp handle
*
* Reserve TSO descriptor buffers
*
* Return: QDF_STATUS_E_FAILURE on failure or
* QDF_STATUS_SUCCESS on success
*/
QDF_STATUS dp_tso_soc_attach(void *txrx_soc);
/**
* dp_tso_detach() - TSO Detach handler
* @txrx_soc: Opaque Dp handle
*
* Deallocate TSO descriptor buffers
*
* Return: QDF_STATUS_E_FAILURE on failure or
* QDF_STATUS_SUCCESS on success
*/
QDF_STATUS dp_tso_soc_detach(void *txrx_soc);
QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev);
QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev);

View File

@@ -101,8 +101,10 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
tx_desc_pool->elem_size = desc_size;
if (!soc->dp_soc_reinit)
qdf_mem_multi_pages_alloc(soc->osdev,
&tx_desc_pool->desc_pages, desc_size, num_elem,
&tx_desc_pool->desc_pages,
desc_size, num_elem,
0, true);
if (!tx_desc_pool->desc_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -117,7 +119,8 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
tx_desc_pool->freelist = (struct dp_tx_desc_s *)
*tx_desc_pool->desc_pages.cacheable_pages;
if (qdf_mem_multi_page_link(soc->osdev,
&tx_desc_pool->desc_pages, desc_size, num_elem, true)) {
&tx_desc_pool->desc_pages,
desc_size, num_elem, true)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"invalid tx desc allocation - overflow num link");
goto free_tx_desc;
@@ -187,16 +190,20 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
struct qdf_mem_dma_page_t *page_info;
struct qdf_mem_multi_page_t *pages;
QDF_STATUS status;
qdf_dma_context_t memctx = 0;
/* Coherent tx extension descriptor alloc */
soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
soc->tx_ext_desc[pool_id].elem_count = num_elem;
memctx = qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx);
if (!soc->dp_soc_reinit) {
qdf_mem_multi_pages_alloc(soc->osdev,
&soc->tx_ext_desc[pool_id].desc_pages,
&soc->tx_ext_desc[pool_id].
desc_pages,
soc->tx_ext_desc[pool_id].elem_size,
soc->tx_ext_desc[pool_id].elem_count,
qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
false);
memctx, false);
}
if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"ext desc page alloc fail");
@@ -213,11 +220,16 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
*/
soc->tx_ext_desc[pool_id].link_elem_size =
sizeof(struct dp_tx_ext_desc_elem_s);
if (!soc->dp_soc_reinit) {
qdf_mem_multi_pages_alloc(soc->osdev,
&soc->tx_ext_desc[pool_id].desc_link_pages,
soc->tx_ext_desc[pool_id].link_elem_size,
soc->tx_ext_desc[pool_id].elem_count, 0,
true);
&soc->tx_ext_desc[pool_id].
desc_link_pages,
soc->tx_ext_desc[pool_id].
link_elem_size,
soc->tx_ext_desc[pool_id].
elem_count,
0, true);
}
if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"ext link desc page alloc fail");
@@ -329,6 +341,7 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
tso_desc_pool = &soc->tx_tso_desc[pool_id];
tso_desc_pool->num_free = 0;
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
if (!soc->dp_soc_reinit)
qdf_mem_multi_pages_alloc(soc->osdev,
&tso_desc_pool->desc_pages,
desc_size,
@@ -406,6 +419,7 @@ QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
tso_num_seg_pool->num_free = 0;
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
if (!soc->dp_soc_reinit)
qdf_mem_multi_pages_alloc(soc->osdev,
&tso_num_seg_pool->desc_pages,
desc_size,

View File

@@ -696,6 +696,10 @@ struct htt_t2h_stats {
/* SOC level structure for data path */
struct dp_soc {
/**
* re-use memory section starts
*/
/* Common base structure - Should be the first member */
struct cdp_soc_t cdp_soc;
@@ -732,6 +736,12 @@ struct dp_soc {
/*number of hw dscp tid map*/
uint8_t num_hw_dscp_tid_map;
/* HAL SOC handle */
void *hal_soc;
/* Device ID coming from Bus sub-system */
uint32_t device_id;
/* Link descriptor memory banks */
struct {
void *base_vaddr_unaligned;
@@ -748,13 +758,7 @@ struct dp_soc {
*/
qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
uint32_t wbm_idle_scatter_buf_size;
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
qdf_spinlock_t flow_pool_array_lock;
tx_pause_callback pause_cb;
struct dp_txrx_pool_stats pool_stats;
#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
/* Tx SW descriptor pool */
struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
@@ -767,30 +771,9 @@ struct dp_soc {
/* Tx TSO Num of segments pool */
struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
/* Tx H/W queues lock */
qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
/* Rx SW descriptor pool for RXDMA buffer */
struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
/* Rx SW descriptor pool for RXDMA monitor buffer */
struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
/* Rx SW descriptor pool for RXDMA status buffer */
struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
/* HAL SOC handle */
void *hal_soc;
/* DP Interrupts */
struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
/* REO destination rings */
struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
/* Number of REO destination rings */
uint8_t num_reo_dest_rings;
/* REO exception ring - See if should combine this with reo_dest_ring */
struct dp_srng reo_exception_ring;
@@ -806,12 +789,12 @@ struct dp_soc {
/* WBM Rx release ring */
struct dp_srng rx_rel_ring;
/* Number of TCL data rings */
uint8_t num_tcl_data_rings;
/* TCL data ring */
struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
/* Number of TCL data rings */
uint8_t num_tcl_data_rings;
/* TCL command ring */
struct dp_srng tcl_cmd_ring;
@@ -824,6 +807,39 @@ struct dp_soc {
/* Common WBM link descriptor release ring (SW to WBM) */
struct dp_srng wbm_desc_rel_ring;
/* DP Interrupts */
struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
/* Rx SW descriptor pool for RXDMA monitor buffer */
struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
/* Rx SW descriptor pool for RXDMA status buffer */
struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
/* Rx SW descriptor pool for RXDMA buffer */
struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
/* Number of REO destination rings */
uint8_t num_reo_dest_rings;
/*
* re-use memory section ends
* reuse memory indicator
*
* DO NOT CHANGE NAME OR MOVE THIS VARIABLE
*/
bool dp_soc_reinit;
uint32_t wbm_idle_scatter_buf_size;
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
qdf_spinlock_t flow_pool_array_lock;
tx_pause_callback pause_cb;
struct dp_txrx_pool_stats pool_stats;
#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
/* Tx H/W queues lock */
qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
/* Tx ring map for interrupt processing */
uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
@@ -950,8 +966,6 @@ struct dp_soc {
qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
} ipa_uc_rx_rsc;
#endif
/* Device ID coming from Bus sub-system */
uint32_t device_id;
/* Smart monitor capability for HKv2 */
uint8_t hw_nac_monitor_support;
@@ -1057,6 +1071,9 @@ struct ppdu_info {
/* PDEV level structure for data path */
struct dp_pdev {
/**
* Re-use Memory Section Starts
*/
/* PDEV handle from OSIF layer TBD: see if we really need osif_pdev */
struct cdp_ctrl_objmgr_pdev *ctrl_pdev;
@@ -1072,26 +1089,6 @@ struct dp_pdev {
/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
struct dp_srng rx_refill_buf_ring;
/* Second ring used to replenish rx buffers */
struct dp_srng rx_refill_buf_ring2;
/* Empty ring used by firmware to post rx buffers to the MAC */
struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
/* wlan_cfg pdev ctxt*/
struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
/* RXDMA monitor buffer replenish ring */
struct dp_srng rxdma_mon_buf_ring[NUM_RXDMA_RINGS_PER_PDEV];
/* RXDMA monitor destination ring */
struct dp_srng rxdma_mon_dst_ring[NUM_RXDMA_RINGS_PER_PDEV];
/* RXDMA monitor status ring. TBD: Check format of this ring */
struct dp_srng rxdma_mon_status_ring[NUM_RXDMA_RINGS_PER_PDEV];
struct dp_srng rxdma_mon_desc_ring[NUM_RXDMA_RINGS_PER_PDEV];
/* RXDMA error destination ring */
struct dp_srng rxdma_err_dst_ring[NUM_RXDMA_RINGS_PER_PDEV];
@@ -1104,6 +1101,33 @@ struct dp_pdev {
uint32_t size;
} link_desc_banks[NUM_RXDMA_RINGS_PER_PDEV][MAX_MON_LINK_DESC_BANKS];
/* RXDMA monitor buffer replenish ring */
struct dp_srng rxdma_mon_buf_ring[NUM_RXDMA_RINGS_PER_PDEV];
/* RXDMA monitor destination ring */
struct dp_srng rxdma_mon_dst_ring[NUM_RXDMA_RINGS_PER_PDEV];
/* RXDMA monitor status ring. TBD: Check format of this ring */
struct dp_srng rxdma_mon_status_ring[NUM_RXDMA_RINGS_PER_PDEV];
struct dp_srng rxdma_mon_desc_ring[NUM_RXDMA_RINGS_PER_PDEV];
/*
* re-use memory section ends
* reuse memory/deinit indicator
*
* DO NOT CHANGE NAME OR MOVE THIS VARIABLE
*/
bool pdev_deinit;
/* Second ring used to replenish rx buffers */
struct dp_srng rx_refill_buf_ring2;
/* Empty ring used by firmware to post rx buffers to the MAC */
struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
/* wlan_cfg pdev ctxt*/
struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
/**
* TODO: See if we need a ring map here for LMAC rings.