qcacmn: Datapath init-deinit changes

Do a logical split of dp_soc_attach and
dp_pdev_attach into Allocation and initialization
and dp_soc_detach and dp_pdev_detach into
de-initialization and free routines

Change-Id: I23bdca0ca86db42a4d0b2554cd60d99bb207a647
This commit is contained in:
phadiman
2020-04-30 23:06:48 +05:30
committed by Gerrit - the friendly Code Review server
parent e4a31a803d
commit 1f3652debc
20 changed files with 2847 additions and 2377 deletions

View File

@@ -1210,6 +1210,39 @@ cdp_soc_init(ol_txrx_soc_handle soc, u_int16_t devid,
dp_ol_if_ops, devid);
}
/**
* cdp_soc_init() - Initialize txrx SOC
* @soc: ol_txrx_soc_handle handle
* @devid: Device ID
* @hif_handle: Opaque HIF handle
* @psoc: Opaque Objmgr handle
* @htc_handle: Opaque HTC handle
* @qdf_dev: QDF device
* @dp_ol_if_ops: Offload Operations
*
* Return: DP SOC handle on success, NULL on failure
*/
static inline QDF_STATUS
cdp_pdev_init(ol_txrx_soc_handle soc,
HTC_HANDLE htc_handle, qdf_device_t qdf_dev,
uint8_t pdev_id)
{
if (!soc || !soc->ops) {
QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
"%s: Invalid Instance:", __func__);
QDF_BUG(0);
return 0;
}
if (!soc->ops->cmn_drv_ops ||
!soc->ops->cmn_drv_ops->txrx_pdev_init)
return 0;
return soc->ops->cmn_drv_ops->txrx_pdev_init(soc,
htc_handle, qdf_dev,
pdev_id);
}
/**
* cdp_soc_deinit() - Deinitialize txrx SOC
* @soc: Opaque DP SOC handle

View File

@@ -353,6 +353,11 @@ struct cdp_cmn_ops {
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops, uint16_t device_id);
QDF_STATUS (*txrx_pdev_init)(ol_txrx_soc_handle soc,
HTC_HANDLE htc_handle,
qdf_device_t qdf_osdev,
uint8_t pdev_id);
/**
* txrx_tso_soc_attach() - TSO attach handler triggered during
* dynamic tso activation
@@ -1075,6 +1080,7 @@ struct ol_if_ops {
int (*peer_ast_flowid_map)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
uint16_t peer_id, uint8_t vdev_id, uint8_t *peer_mac_addr);
#endif
int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle);
/* TODO: Add any other control path calls required to OL_IF/WMA layer */
};

View File

@@ -490,7 +490,7 @@ htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
* htt_htc_pkt_pool_free() - Free HTC packet pool
* @htt_soc: HTT SOC handle
*/
static void
void
htt_htc_pkt_pool_free(struct htt_soc *soc)
{
struct dp_htt_htc_pkt_union *pkt, *next;
@@ -3925,9 +3925,10 @@ struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
}
if (i != MAX_PDEV_CNT) {
for (j = 0; j < i; j++) {
qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
qdf_mem_free(htt_soc->pdevid_tt[i].lmac_ttt);
qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
}
qdf_mem_free(htt_soc);
return NULL;
}

View File

@@ -63,6 +63,8 @@ int htt_wbm_event_record(struct htt_logger *h, uint8_t tx_status,
#endif
void htt_htc_pkt_pool_free(struct htt_soc *soc);
#define HTT_TX_MUTEX_TYPE qdf_spinlock_t
#define HTT_TX_MUTEX_INIT(_mutex) \

View File

@@ -1514,8 +1514,6 @@ static inline int dp_get_mac_id_for_mac(struct dp_soc *soc, uint32_t mac_id)
return 0;
}
bool dp_is_soc_reinit(struct dp_soc *soc);
/*
* dp_is_subtype_data() - check if the frame subtype is data
*

File diff suppressed because it is too large Load Diff

View File

@@ -2488,21 +2488,6 @@ QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
return QDF_STATUS_SUCCESS;
}
/**
* dp_rx_pdev_detach() - detach dp rx
* @pdev: core txrx pdev context
*
* This function will detach DP RX into main device context
* will free DP Rx resources.
*
* Return: void
*/
void
dp_rx_pdev_detach(struct dp_pdev *pdev)
{
dp_rx_pdev_desc_pool_free(pdev);
}
static QDF_STATUS
dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc, qdf_nbuf_t *nbuf,
struct dp_pdev *dp_pdev,
@@ -2566,6 +2551,8 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
int page_idx, total_pages;
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
int sync_hw_ptr = 1;
uint32_t num_entries_avail;
if (qdf_unlikely(!rxdma_srng)) {
DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
@@ -2574,6 +2561,20 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
rxdma_srng,
sync_hw_ptr);
hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
if (!num_entries_avail) {
dp_err("Num of available entries is zero, nothing to do");
return QDF_STATUS_E_NOMEM;
}
if (num_entries_avail < num_req_buffers)
num_req_buffers = num_entries_avail;
nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
num_req_buffers, &desc_list, &tail);
if (!nr_descs) {
@@ -2697,7 +2698,7 @@ dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"nss-wifi<4> skip Rx refil %d", mac_for_pdev);
status = QDF_STATUS_SUCCESS;
return status;
}
dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
@@ -2712,10 +2713,6 @@ dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
if (status != QDF_STATUS_SUCCESS)
return status;
rx_desc_pool->owner = DP_WBM2SW_RBM;
rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
return status;
}
@@ -2770,6 +2767,10 @@ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
rx_sw_desc_weight =
wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx);
rx_desc_pool->owner = DP_WBM2SW_RBM;
rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
dp_rx_desc_pool_init(soc, mac_for_pdev,
rx_sw_desc_weight * rxdma_entries,
rx_desc_pool);
@@ -2794,50 +2795,6 @@ void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
dp_rx_desc_pool_deinit(soc, rx_desc_pool);
}
/**
* dp_rx_attach() - attach DP RX
* @pdev: core txrx pdev context
*
* This function will attach a DP RX instance into the main
* device (SOC) context. Will allocate dp rx resource and
* initialize resources.
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS
dp_rx_pdev_attach(struct dp_pdev *pdev)
{
struct dp_soc *soc = pdev->soc;
QDF_STATUS ret_val;
if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"nss-wifi<4> skip Rx refil %d",
pdev->pdev_id);
return QDF_STATUS_SUCCESS;
}
if (!dp_is_soc_reinit(soc)) {
ret_val = dp_rx_pdev_desc_pool_alloc(pdev);
if (ret_val != QDF_STATUS_SUCCESS)
return ret_val;
}
dp_rx_pdev_desc_pool_init(pdev);
ret_val = dp_rx_fst_attach(soc, pdev);
if ((ret_val != QDF_STATUS_SUCCESS) &&
(ret_val != QDF_STATUS_E_NOSUPPORT)) {
QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
"RX Flow Search Table attach failed: pdev %d err %d",
pdev->pdev_id, ret_val);
return ret_val;
}
return dp_rx_pdev_buffers_alloc(pdev);
}
/*
* dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
*

View File

@@ -510,6 +510,7 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size,
struct rx_desc_pool *rx_desc_pool);
void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
union dp_rx_desc_list_elem_t **local_desc_list,

View File

@@ -31,7 +31,7 @@ A_COMPILE_TIME_ASSERT(cookie_size_check,
*
* @rx_desc_pool: rx descriptor pool pointer
* Return: QDF_STATUS QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
{
@@ -233,7 +233,7 @@ void dp_rx_desc_pool_deinit(struct dp_soc *soc,
* @rx_desc_pool: rx descriptor pool pointer
*
* Return: QDF_STATUS QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
{
@@ -377,6 +377,7 @@ void dp_rx_desc_pool_deinit(struct dp_soc *soc,
}
#endif /* RX_DESC_MULTI_PAGE_ALLOC */
/*
* dp_rx_get_free_desc_list() - provide a list of descriptors from
* the free rx desc pool.

View File

@@ -19,6 +19,8 @@
#ifndef _DP_RX_MON_H_
#define _DP_RX_MON_H_
#define MON_BUF_MIN_ENTRIES 8
/*
* dp_rx_mon_status_process() - Process monitor status ring and
*>.....TLV in status ring.
@@ -47,10 +49,28 @@ dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota);
void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id,
uint32_t quota);
QDF_STATUS dp_rx_pdev_mon_attach(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_mon_detach(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int mac_id);
QDF_STATUS dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id);
QDF_STATUS dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev);
void dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev);
void dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev);
void dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev);
void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev);
void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
QDF_STATUS dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev,
uint32_t mac_id);
QDF_STATUS dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev,
uint32_t mac_id);
void dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev,
uint32_t mac_id);
void dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev,
uint32_t mac_id);
void dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev,
uint32_t mac_id);
void dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
QDF_STATUS
dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
bool delayed_replenish);
#ifdef QCA_SUPPORT_FULL_MON

View File

@@ -1092,9 +1092,9 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
qdf_mem_zero(&(pdev->ppdu_info.rx_status),
sizeof(pdev->ppdu_info.rx_status));
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s %d ppdu_id %x != ppdu_info.com_info .ppdu_id %x",
__func__, __LINE__,
ppdu_id, pdev->ppdu_info.com_info.ppdu_id);
"%s %d ppdu_id %x != ppdu_info.com_info.ppdu_id %x",
__func__, __LINE__,
ppdu_id, pdev->ppdu_info.com_info.ppdu_id);
break;
}
@@ -1121,27 +1121,208 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
}
}
#ifndef DISABLE_MON_CONFIG
#if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490) && \
!defined(QCA_WIFI_QCA6750)
/**
* dp_rx_pdev_mon_buf_attach() - Allocate the monitor descriptor pool
*
* @pdev: physical device handle
* @mac_id: mac id
*
* Return: QDF_STATUS
*/
#define MON_BUF_MIN_ALLOC_ENTRIES 128
static QDF_STATUS
dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) {
QDF_STATUS
dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
bool delayed_replenish)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct dp_srng *mon_buf_ring;
uint32_t num_entries;
struct rx_desc_pool *rx_desc_pool;
struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id];
num_entries = mon_buf_ring->num_entries;
rx_desc_pool = &soc->rx_desc_mon[mac_id];
dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
/* Replenish RXDMA monitor buffer ring with 8 buffers only
* delayed_replenish_entries is actually 8 but when we call
* dp_pdev_rx_buffers_attach() we pass 1 less than 8, hence
* added 1 to delayed_replenish_entries to ensure we have 8
* entries. Once the monitor VAP is configured we replenish
* the complete RXDMA monitor buffer ring.
*/
if (delayed_replenish)
num_entries = soc_cfg_ctx->delayed_replenish_entries + 1;
else
num_entries -= soc_cfg_ctx->delayed_replenish_entries;
return dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring,
rx_desc_pool, num_entries - 1);
}
static QDF_STATUS
dp_rx_pdev_mon_cmn_buffers_alloc(struct dp_pdev *pdev, int mac_id)
{
struct dp_soc *soc = pdev->soc;
uint8_t pdev_id = pdev->pdev_id;
int mac_for_pdev;
bool delayed_replenish;
QDF_STATUS status = QDF_STATUS_SUCCESS;
uint32_t rx_desc_pool_size, replenish_size;
struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
delayed_replenish = soc_cfg_ctx->delayed_replenish_entries ? 1 : 0;
mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, pdev_id);
status = dp_rx_pdev_mon_status_buffers_alloc(pdev, mac_for_pdev);
if (!QDF_IS_STATUS_SUCCESS(status)) {
dp_err("%s: dp_rx_pdev_mon_status_desc_pool_alloc() failed",
__func__);
goto fail;
}
if (!soc->wlan_cfg_ctx->rxdma1_enable)
return status;
status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
delayed_replenish);
if (!QDF_IS_STATUS_SUCCESS(status)) {
dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n",
__func__);
goto mon_stat_buf_dealloc;
}
return status;
mon_stat_buf_dealloc:
dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev);
fail:
return status;
}
static void
dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct dp_srng *mon_buf_ring;
uint32_t num_entries;
struct rx_desc_pool *rx_desc_pool;
uint32_t rx_desc_pool_size;
struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id];
num_entries = mon_buf_ring->num_entries;
rx_desc_pool = &soc->rx_desc_mon[mac_id];
dp_debug("Mon RX Desc buf Pool[%d] init entries=%u",
pdev_id, num_entries);
rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) *
num_entries;
rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE;
rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT;
dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool);
pdev->mon_last_linkdesc_paddr = 0;
pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
qdf_spinlock_create(&pdev->mon_lock);
}
static void
dp_rx_pdev_mon_cmn_desc_pool_init(struct dp_pdev *pdev, int mac_id)
{
struct dp_soc *soc = pdev->soc;
uint32_t mac_for_pdev;
mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
dp_rx_pdev_mon_status_desc_pool_init(pdev, mac_for_pdev);
if (!soc->wlan_cfg_ctx->rxdma1_enable)
return;
dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
dp_link_desc_ring_replenish(soc, mac_for_pdev);
}
static void
dp_rx_pdev_mon_buf_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_mon[mac_id];
dp_debug("Mon RX Desc buf Pool[%d] deinit", pdev_id);
dp_rx_desc_pool_deinit(soc, rx_desc_pool);
qdf_spinlock_destroy(&pdev->mon_lock);
}
static void
dp_rx_pdev_mon_cmn_desc_pool_deinit(struct dp_pdev *pdev, int mac_id)
{
struct dp_soc *soc = pdev->soc;
uint8_t pdev_id = pdev->pdev_id;
int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
dp_rx_pdev_mon_status_desc_pool_deinit(pdev, mac_for_pdev);
if (!soc->wlan_cfg_ctx->rxdma1_enable)
return;
dp_rx_pdev_mon_buf_desc_pool_deinit(pdev, mac_for_pdev);
}
static void
dp_rx_pdev_mon_buf_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_mon[mac_id];
dp_debug("Mon RX Buf Desc Pool Free pdev[%d]", pdev_id);
dp_rx_desc_pool_free(soc, rx_desc_pool);
}
static void
dp_rx_pdev_mon_cmn_desc_pool_free(struct dp_pdev *pdev, int mac_id)
{
struct dp_soc *soc = pdev->soc;
uint8_t pdev_id = pdev->pdev_id;
int mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
dp_rx_pdev_mon_buf_desc_pool_free(pdev, mac_for_pdev);
dp_hw_link_desc_pool_banks_free(soc, mac_for_pdev);
}
void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_mon[mac_id];
dp_debug("Mon RX Buf buffers Free pdev[%d]", pdev_id);
dp_rx_desc_nbuf_free(soc, rx_desc_pool);
}
static QDF_STATUS
dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct dp_srng *mon_buf_ring;
uint32_t num_entries;
struct rx_desc_pool *rx_desc_pool;
uint32_t rx_desc_pool_size;
struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
mon_buf_ring = &soc->rxdma_mon_buf_ring[mac_id];
@@ -1151,320 +1332,147 @@ dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) {
dp_debug("Mon RX Desc Pool[%d] entries=%u",
pdev_id, num_entries);
rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc->wlan_cfg_ctx) * num_entries;
if (!dp_is_soc_reinit(soc)) {
status = dp_rx_desc_pool_alloc(soc, rx_desc_pool_size,
rx_desc_pool);
if (!QDF_IS_STATUS_SUCCESS(status))
return status;
}
rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) *
num_entries;
rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE;
rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT;
replenish_size = ((num_entries - 1) < MON_BUF_MIN_ALLOC_ENTRIES) ?
(num_entries - 1) : MON_BUF_MIN_ALLOC_ENTRIES;
dp_rx_desc_pool_init(soc, mac_id, rx_desc_pool_size, rx_desc_pool);
status = dp_pdev_rx_buffers_attach(soc, mac_id, mon_buf_ring,
rx_desc_pool, replenish_size);
return status;
return dp_rx_desc_pool_alloc(soc, rx_desc_pool_size, rx_desc_pool);
}
static QDF_STATUS
dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id)
dp_rx_pdev_mon_cmn_desc_pool_alloc(struct dp_pdev *pdev, int mac_id)
{
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_mon[mac_id];
if (rx_desc_pool->pool_size != 0) {
if (!dp_is_soc_reinit(soc))
dp_rx_desc_nbuf_and_pool_free(soc, mac_id,
rx_desc_pool);
else
dp_rx_desc_nbuf_free(soc, rx_desc_pool);
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_mon_link_desc_pool_setup(): Allocate and setup link descriptor pool
* that will be used by HW for various link
* and queue descriptorsand managed by WBM
*
* @soc: soc handle
* @mac_id: mac id
*
* Return: QDF_STATUS
*/
static
QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t lmac_id)
{
if (!dp_is_soc_reinit(soc))
if (dp_hw_link_desc_pool_banks_alloc(soc, lmac_id))
return QDF_STATUS_E_FAILURE;
dp_link_desc_ring_replenish(soc, lmac_id);
return QDF_STATUS_SUCCESS;
}
/*
* Free link descriptor pool that was setup HW
*/
static
void dp_mon_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id)
{
dp_hw_link_desc_pool_banks_free(soc, mac_id);
}
/**
* dp_mon_buf_delayed_replenish() - Helper routine to replenish monitor dest buf
* @pdev: DP pdev object
*
* Return: None
*/
void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev)
{
struct dp_soc *soc;
uint8_t pdev_id = pdev->pdev_id;
uint32_t mac_for_pdev;
union dp_rx_desc_list_elem_t *tail = NULL;
union dp_rx_desc_list_elem_t *desc_list = NULL;
uint32_t num_entries;
uint32_t id;
soc = pdev->soc;
num_entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev->wlan_cfg_ctx);
for (id = 0; id < NUM_RXDMA_RINGS_PER_PDEV; id++) {
/*
* Get mac_for_pdev appropriately for both MCL & WIN,
* since MCL have multiple mon buf rings and WIN just
* has one mon buffer ring mapped per pdev, below API
* helps identify accurate buffer_ring for both cases
*
*/
mac_for_pdev =
dp_get_lmac_id_for_pdev_id(soc, id, pdev->pdev_id);
dp_rx_buffers_replenish(soc, mac_for_pdev,
dp_rxdma_get_mon_buf_ring(pdev,
mac_for_pdev),
dp_rx_get_mon_desc_pool(soc,
mac_for_pdev,
pdev->pdev_id),
num_entries, &desc_list, &tail);
}
}
#else
static
QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
{
return QDF_STATUS_SUCCESS;
}
static QDF_STATUS
dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id)
{
return QDF_STATUS_SUCCESS;
}
static
void dp_mon_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id)
{
}
static QDF_STATUS
dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id)
{
return QDF_STATUS_SUCCESS;
}
void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev)
{}
#endif
/**
* dp_rx_pdev_mon_cmn_detach() - detach dp rx for monitor mode
* @pdev: core txrx pdev context
* @mac_id: mac_id for which deinit is to be done
*
* This function will free DP Rx resources for
* monitor mode
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
static QDF_STATUS
dp_rx_pdev_mon_cmn_detach(struct dp_pdev *pdev, int mac_id) {
struct dp_soc *soc = pdev->soc;
uint8_t pdev_id = pdev->pdev_id;
int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
dp_mon_link_desc_pool_cleanup(soc, lmac_id);
dp_rx_pdev_mon_status_detach(pdev, lmac_id);
dp_rx_pdev_mon_buf_detach(pdev, lmac_id);
return QDF_STATUS_SUCCESS;
}
/**
* dp_rx_pdev_mon_cmn_attach() - attach DP RX for monitor mode
* @pdev: core txrx pdev context
* @mac_id: mac_id for which init is to be done
*
* This function Will allocate dp rx resource and
* initialize resources for monitor mode.
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
static QDF_STATUS
dp_rx_pdev_mon_cmn_attach(struct dp_pdev *pdev, int mac_id) {
struct dp_soc *soc = pdev->soc;
uint8_t pdev_id = pdev->pdev_id;
int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
QDF_STATUS status;
status = dp_rx_pdev_mon_buf_attach(pdev, lmac_id);
mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
/* Allocate sw rx descriptor pool for monitor status ring */
status = dp_rx_pdev_mon_status_desc_pool_alloc(pdev, mac_for_pdev);
if (!QDF_IS_STATUS_SUCCESS(status)) {
dp_err("%s: dp_rx_pdev_mon_buf_attach() failed\n", __func__);
dp_err("%s: dp_rx_pdev_mon_status_desc_pool_alloc() failed",
__func__);
goto fail;
}
status = dp_rx_pdev_mon_status_attach(pdev, lmac_id);
if (!soc->wlan_cfg_ctx->rxdma1_enable)
return status;
/* Allocate sw rx descriptor pool for monitor RxDMA buffer ring */
status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
if (!QDF_IS_STATUS_SUCCESS(status)) {
dp_err("%s: dp_rx_pdev_mon_status_attach() failed", __func__);
goto mon_buf_detach;
dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n",
__func__);
goto mon_status_dealloc;
}
status = dp_mon_link_desc_pool_setup(soc, lmac_id);
/* Allocate link descriptors for the monitor link descriptor ring */
status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
if (!QDF_IS_STATUS_SUCCESS(status)) {
dp_err("%s: dp_mon_link_desc_pool_setup() failed", __func__);
goto mon_status_detach;
dp_err("%s: dp_hw_link_desc_pool_banks_alloc() failed",
__func__);
goto mon_buf_dealloc;
}
return status;
mon_status_detach:
dp_rx_pdev_mon_status_detach(pdev, lmac_id);
mon_buf_detach:
dp_rx_pdev_mon_buf_detach(pdev, lmac_id);
mon_buf_dealloc:
dp_rx_pdev_mon_buf_desc_pool_free(pdev, mac_for_pdev);
mon_status_dealloc:
dp_rx_pdev_mon_status_desc_pool_free(pdev, mac_for_pdev);
fail:
return status;
}
/**
* dp_rx_pdev_mon_attach() - attach DP RX for monitor mode
* @pdev: core txrx pdev context
*
* This function will attach a DP RX for monitor mode instance into
* the main device (SOC) context. Will allocate dp rx resource and
* initialize resources.
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS
dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
QDF_STATUS status;
uint8_t pdev_id = pdev->pdev_id;
int mac_id;
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
"%s: pdev attach id=%d", __func__, pdev_id);
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
status = dp_rx_pdev_mon_cmn_attach(pdev, mac_id);
if (!QDF_IS_STATUS_SUCCESS(status)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR,
"%s: dp_rx_pdev_mon_cmn_attach(%d) failed\n",
__func__, mac_id);
goto fail;
}
}
pdev->mon_last_linkdesc_paddr = 0;
pdev->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
qdf_spinlock_create(&pdev->mon_lock);
/* Attach full monitor mode resources */
dp_full_mon_attach(pdev);
return QDF_STATUS_SUCCESS;
fail:
for (mac_id = mac_id - 1; mac_id >= 0; mac_id--)
dp_rx_pdev_mon_cmn_detach(pdev, mac_id);
return status;
}
QDF_STATUS
dp_mon_link_free(struct dp_pdev *pdev) {
static void
dp_rx_pdev_mon_cmn_buffers_free(struct dp_pdev *pdev, int mac_id)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
int mac_id, lmac_id;
int mac_for_pdev;
mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id, pdev_id);
dp_rx_pdev_mon_status_buffers_free(pdev, mac_for_pdev);
if (!soc->wlan_cfg_ctx->rxdma1_enable)
return;
dp_rx_pdev_mon_buf_buffers_free(pdev, mac_for_pdev);
}
QDF_STATUS
dp_rx_pdev_mon_desc_pool_alloc(struct dp_pdev *pdev)
{
QDF_STATUS status;
int mac_id, count;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
dp_mon_link_desc_pool_cleanup(soc, lmac_id);
}
status = dp_rx_pdev_mon_cmn_desc_pool_alloc(pdev, mac_id);
if (!QDF_IS_STATUS_SUCCESS(status)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR, "%s: %d failed\n",
__func__, mac_id);
return QDF_STATUS_SUCCESS;
for (count = 0; count < mac_id; count++)
dp_rx_pdev_mon_cmn_desc_pool_free(pdev, count);
return status;
}
}
return status;
}
/**
* dp_rx_pdev_mon_detach() - detach dp rx for monitor mode
* @pdev: core txrx pdev context
*
* This function will detach DP RX for monitor mode from
* main device context. will free DP Rx resources for
* monitor mode
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS
dp_rx_pdev_mon_detach(struct dp_pdev *pdev) {
uint8_t pdev_id = pdev->pdev_id;
void
dp_rx_pdev_mon_desc_pool_init(struct dp_pdev *pdev)
{
int mac_id;
qdf_spinlock_destroy(&pdev->mon_lock);
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
dp_rx_pdev_mon_cmn_desc_pool_init(pdev, mac_id);
}
void
dp_rx_pdev_mon_desc_pool_deinit(struct dp_pdev *pdev)
{
int mac_id;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
dp_rx_pdev_mon_cmn_desc_pool_deinit(pdev, mac_id);
}
void dp_rx_pdev_mon_desc_pool_free(struct dp_pdev *pdev)
{
int mac_id;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
dp_rx_pdev_mon_cmn_desc_pool_free(pdev, mac_id);
}
void
dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev)
{
int mac_id;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
dp_rx_pdev_mon_cmn_buffers_free(pdev, mac_id);
}
QDF_STATUS
dp_rx_pdev_mon_buffers_alloc(struct dp_pdev *pdev)
{
int mac_id;
QDF_STATUS status;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
int mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc,
mac_id, pdev_id);
dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev);
dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev);
status = dp_rx_pdev_mon_cmn_buffers_alloc(pdev, mac_id);
if (!QDF_IS_STATUS_SUCCESS(status)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR, "%s: %d failed\n",
__func__, mac_id);
return status;
}
}
/* Detach full monitor mode resources */
dp_full_mon_detach(pdev);
return QDF_STATUS_SUCCESS;
}
#else
QDF_STATUS
dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
return QDF_STATUS_SUCCESS;
return status;
}
QDF_STATUS
dp_rx_pdev_mon_detach(struct dp_pdev *pdev) {
return QDF_STATUS_SUCCESS;
}
QDF_STATUS
dp_mon_link_free(struct dp_pdev *pdev) {
return QDF_STATUS_SUCCESS;
}
void dp_mon_buf_delayed_replenish(struct dp_pdev *pdev)
{}
#endif /* DISABLE_MON_CONFIG */

View File

@@ -34,6 +34,16 @@
#include "dp_ratetable.h"
#endif
static inline
QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail,
uint8_t owner);
static inline void
dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev,
struct hal_rx_ppdu_info *ppdu_info,
@@ -1903,6 +1913,7 @@ done:
return work_done;
}
/*
* dp_rx_mon_status_process() - Process monitor status ring and
* TLV in status ring.
@@ -1923,6 +1934,7 @@ dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
return work_done;
}
/**
* dp_mon_process() - Main monitor mode processing roution.
* This call monitor status ring process then monitor
@@ -1942,34 +1954,143 @@ dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
return dp_rx_mon_status_process(soc, mac_id, quota);
}
/**
* dp_rx_pdev_mon_status_detach() - detach dp rx for status ring
* @pdev: core txrx pdev context
* @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
*
* This function will detach DP RX status ring from
* main device context. will free DP Rx resources for
* status ring
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS
dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id)
dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct dp_srng *mon_status_ring;
uint32_t num_entries;
struct rx_desc_pool *rx_desc_pool;
struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
soc_cfg_ctx = soc->wlan_cfg_ctx;
mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
num_entries = mon_status_ring->num_entries;
rx_desc_pool = &soc->rx_desc_status[mac_id];
dp_debug("Mon RX Desc Pool[%d] entries=%u",
pdev_id, num_entries);
return dp_rx_mon_status_buffers_replenish(soc, mac_id, mon_status_ring,
rx_desc_pool, num_entries,
&desc_list, &tail,
HAL_RX_BUF_RBM_SW3_BM);
}
QDF_STATUS
dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct dp_srng *mon_status_ring;
uint32_t num_entries;
struct rx_desc_pool *rx_desc_pool;
struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
soc_cfg_ctx = soc->wlan_cfg_ctx;
mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
num_entries = mon_status_ring->num_entries;
rx_desc_pool = &soc->rx_desc_status[mac_id];
dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool);
}
void
dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
{
uint32_t i;
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct dp_srng *mon_status_ring;
uint32_t num_entries;
struct rx_desc_pool *rx_desc_pool;
struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
soc_cfg_ctx = soc->wlan_cfg_ctx;
mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
num_entries = mon_status_ring->num_entries;
rx_desc_pool = &soc->rx_desc_status[mac_id];
dp_debug("Mon RX Desc status Pool[%d] init entries=%u",
pdev_id, num_entries);
rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
dp_rx_desc_pool_init(soc, mac_id, num_entries + 1, rx_desc_pool);
qdf_nbuf_queue_init(&pdev->rx_status_q);
pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
qdf_mem_zero(&pdev->ppdu_info.rx_status,
sizeof(pdev->ppdu_info.rx_status));
qdf_mem_zero(&pdev->rx_mon_stats, sizeof(pdev->rx_mon_stats));
dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info,
&pdev->rx_mon_stats);
for (i = 0; i < MAX_MU_USERS; i++) {
qdf_nbuf_queue_init(&pdev->mpdu_q[i]);
pdev->is_mpdu_hdr[i] = true;
}
qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS]));
pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED;
}
void
dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) {
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_status[mac_id];
if (rx_desc_pool->pool_size != 0) {
if (!dp_is_soc_reinit(soc))
dp_rx_desc_nbuf_and_pool_free(soc, mac_id,
rx_desc_pool);
else
dp_rx_desc_nbuf_free(soc, rx_desc_pool);
}
return QDF_STATUS_SUCCESS;
dp_debug("Mon RX Desc status Pool[%d] deinit", pdev_id);
dp_rx_desc_pool_deinit(soc, rx_desc_pool);
}
void
dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) {
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_status[mac_id];
dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id);
dp_rx_desc_pool_free(soc, rx_desc_pool);
}
void
dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_status[mac_id];
dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id);
dp_rx_desc_nbuf_free(soc, rx_desc_pool);
}
/*
@@ -2132,80 +2253,3 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
return QDF_STATUS_SUCCESS;
}
/**
* dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
* @pdev: core txrx pdev context
* @ring_id: ring number
* This function will attach a DP RX monitor status ring into pDEV
* and replenish monitor status ring with buffer.
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS
dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) {
struct dp_soc *soc = pdev->soc;
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
struct dp_srng *mon_status_ring;
uint32_t num_entries;
uint32_t i;
struct rx_desc_pool *rx_desc_pool;
QDF_STATUS status;
mon_status_ring = &soc->rxdma_mon_status_ring[ring_id];
num_entries = mon_status_ring->num_entries;
rx_desc_pool = &soc->rx_desc_status[ring_id];
dp_info("Mon RX Status Pool[%d] entries=%d",
ring_id, num_entries);
if (!dp_is_soc_reinit(soc)) {
status = dp_rx_desc_pool_alloc(soc, num_entries + 1,
rx_desc_pool);
if (!QDF_IS_STATUS_SUCCESS(status))
return status;
}
dp_rx_desc_pool_init(soc, ring_id, num_entries + 1, rx_desc_pool);
rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
dp_debug("Mon RX Status Buffers Replenish ring_id=%d", ring_id);
status = dp_rx_mon_status_buffers_replenish(soc, ring_id,
mon_status_ring,
rx_desc_pool,
num_entries,
&desc_list, &tail,
HAL_RX_BUF_RBM_SW3_BM);
if (!QDF_IS_STATUS_SUCCESS(status))
return status;
qdf_nbuf_queue_init(&pdev->rx_status_q);
pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
qdf_mem_zero(&(pdev->ppdu_info.rx_status),
sizeof(pdev->ppdu_info.rx_status));
qdf_mem_zero(&pdev->rx_mon_stats,
sizeof(pdev->rx_mon_stats));
dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info,
&pdev->rx_mon_stats);
for (i = 0; i < MAX_MU_USERS; i++) {
qdf_nbuf_queue_init(&pdev->mpdu_q[i]);
pdev->is_mpdu_hdr[i] = true;
}
qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS]));
pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED;
return QDF_STATUS_SUCCESS;
}

View File

@@ -4205,7 +4205,7 @@ QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
{
struct dp_soc *soc = pdev->soc;
@@ -4239,8 +4239,8 @@ QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
/* Pools will be allocated dynamically */
static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
int num_desc)
static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
int num_desc)
{
uint8_t i;
@@ -4249,7 +4249,17 @@ static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
}
return 0;
return QDF_STATUS_SUCCESS;
}
static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
int num_desc)
{
return QDF_STATUS_SUCCESS;
}
static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
{
}
static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
@@ -4260,105 +4270,127 @@ static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
}
#else /* QCA_LL_TX_FLOW_CONTROL_V2! */
static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
int num_desc)
static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
int num_desc)
{
uint8_t i;
uint8_t i, count;
/* Allocate software Tx descriptor pools */
for (i = 0; i < num_pool; i++) {
if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx Desc Pool alloc %d failed %pK",
__func__, i, soc);
return ENOMEM;
FL("Tx Desc Pool alloc %d failed %pK"),
i, soc);
goto fail;
}
}
return 0;
return QDF_STATUS_SUCCESS;
fail:
for (count = 0; count < i; count++)
dp_tx_desc_pool_free(soc, count);
return QDF_STATUS_E_NOMEM;
}
static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
int num_desc)
{
uint8_t i;
for (i = 0; i < num_pool; i++) {
if (dp_tx_desc_pool_init(soc, i, num_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
FL("Tx Desc Pool init %d failed %pK"),
i, soc);
return QDF_STATUS_E_NOMEM;
}
}
return QDF_STATUS_SUCCESS;
}
static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
{
uint8_t i;
for (i = 0; i < num_pool; i++)
dp_tx_desc_pool_deinit(soc, i);
}
static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
{
uint8_t i;
for (i = 0; i < num_pool; i++) {
qdf_assert_always(!soc->tx_desc[i].num_allocated);
if (dp_tx_desc_pool_free(soc, i)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s Tx Desc Pool Free failed", __func__);
}
}
for (i = 0; i < num_pool; i++)
dp_tx_desc_pool_free(soc, i);
}
#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
#ifndef QCA_MEM_ATTACH_ON_WIFI3
/**
* dp_tso_attach_wifi3() - TSO attach handler
* @txrx_soc: Opaque Dp handle
* dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
* @soc: core txrx main context
* @num_pool: number of pools
*
* Reserve TSO descriptor buffers
*
* Return: QDF_STATUS_E_FAILURE on failure or
* QDF_STATUS_SUCCESS on success
*/
static
QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
{
return dp_tso_soc_attach(txrx_soc);
dp_tx_tso_desc_pool_deinit(soc, num_pool);
dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
}
/**
* dp_tso_detach_wifi3() - TSO Detach handler
* @txrx_soc: Opaque Dp handle
* dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
* @soc: core txrx main context
* @num_pool: number of pools
*
* Deallocate TSO descriptor buffers
*
* Return: QDF_STATUS_E_FAILURE on failure or
* QDF_STATUS_SUCCESS on success
*/
static
QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
{
return dp_tso_soc_detach(txrx_soc);
}
#else
static
QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
{
return QDF_STATUS_SUCCESS;
dp_tx_tso_desc_pool_free(soc, num_pool);
dp_tx_tso_num_seg_pool_free(soc, num_pool);
}
static
QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
/**
* dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
* @soc: core txrx main context
*
* This function frees all tx related descriptors as below
* 1. Regular TX descriptors (static pools)
* 2. extension TX descriptors (used for ME, RAW, TSO etc...)
* 3. TSO descriptors
*
*/
void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
{
return QDF_STATUS_SUCCESS;
}
#endif
QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
uint8_t i;
uint8_t num_pool;
uint32_t num_desc;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
for (i = 0; i < num_pool; i++)
dp_tx_tso_desc_pool_free(soc, i);
dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
dp_tx_ext_desc_pool_free(soc, num_pool);
dp_tx_delete_static_pools(soc, num_pool);
}
dp_info("%s TSO Desc Pool %d Free descs = %d",
__func__, num_pool, num_desc);
/**
* dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
* @soc: core txrx main context
*
* This function de-initializes all tx related descriptors as below
* 1. Regular TX descriptors (static pools)
* 2. extension TX descriptors (used for ME, RAW, TSO etc...)
* 3. TSO descriptors
*
*/
void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
{
uint8_t num_pool;
for (i = 0; i < num_pool; i++)
dp_tx_tso_num_seg_pool_free(soc, i);
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
__func__, num_pool, num_desc);
return QDF_STATUS_SUCCESS;
dp_tx_flow_control_deinit(soc);
dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
dp_tx_ext_desc_pool_deinit(soc, num_pool);
dp_tx_deinit_static_pools(soc, num_pool);
}
/**
@@ -4370,105 +4402,69 @@ QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
* Return: QDF_STATUS_E_FAILURE on failure or
* QDF_STATUS_SUCCESS on success
*/
QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
uint8_t num_pool,
uint16_t num_desc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
uint8_t i;
uint8_t num_pool;
uint32_t num_desc;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
for (i = 0; i < num_pool; i++) {
if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
dp_err("TSO Desc Pool alloc %d failed %pK",
i, soc);
return QDF_STATUS_E_FAILURE;
}
if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
return QDF_STATUS_E_FAILURE;
}
dp_info("%s TSO Desc Alloc %d, descs = %d",
__func__, num_pool, num_desc);
for (i = 0; i < num_pool; i++) {
if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
dp_err("TSO Num of seg Pool alloc %d failed %pK",
i, soc);
return QDF_STATUS_E_FAILURE;
}
if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
dp_err("TSO Num of seg Pool alloc %d failed %pK",
num_pool, soc);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_tx_soc_detach() - detach soc from dp tx
* @soc: core txrx main context
* dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
* @soc: DP soc handle
* @num_pool: Number of pools
* @num_desc: Number of descriptors
*
* This function will detach dp tx into main device context
* will free dp tx resource and initialize resources
* Initialize TSO descriptor pools
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
* Return: QDF_STATUS_E_FAILURE on failure or
* QDF_STATUS_SUCCESS on success
*/
QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
uint8_t num_pool,
uint16_t num_desc)
{
uint8_t num_pool;
uint16_t num_desc;
uint16_t num_ext_desc;
uint8_t i;
QDF_STATUS status = QDF_STATUS_SUCCESS;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
dp_tx_flow_control_deinit(soc);
dp_tx_delete_static_pools(soc, num_pool);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s Tx Desc Pool Free num_pool = %d, descs = %d",
__func__, num_pool, num_desc);
for (i = 0; i < num_pool; i++) {
if (dp_tx_ext_desc_pool_free(soc, i)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s Tx Ext Desc Pool Free failed",
__func__);
return QDF_STATUS_E_RESOURCES;
}
if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
return QDF_STATUS_E_FAILURE;
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s MSDU Ext Desc Pool %d Free descs = %d",
__func__, num_pool, num_ext_desc);
status = dp_tso_detach_wifi3(soc);
if (status != QDF_STATUS_SUCCESS)
return status;
if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
dp_err("TSO Num of seg Pool alloc %d failed %pK",
num_pool, soc);
return QDF_STATUS_E_FAILURE;
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_tx_soc_attach() - attach soc to dp tx
* dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
* @soc: core txrx main context
*
* This function will attach dp tx into main device context
* will allocate dp tx resource and initialize resources
* This function allocates memory for following descriptor pools
* 1. regular sw tx descriptor pools (static pools)
* 2. TX extension descriptor pools (ME, RAW, TSO etc...)
* 3. TSO descriptor pools
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
{
uint8_t i;
uint8_t num_pool;
uint32_t num_desc;
uint32_t num_ext_desc;
QDF_STATUS status = QDF_STATUS_SUCCESS;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
@@ -4480,68 +4476,117 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
if ((num_pool > MAX_TXDESC_POOLS) ||
(num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
goto fail;
goto fail1;
if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
goto fail;
goto fail1;
dp_tx_flow_control_init(soc);
if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
goto fail2;
/* Allocate extension tx descriptor pools */
for (i = 0; i < num_pool; i++) {
if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"MSDU Ext Desc Pool alloc %d failed %pK",
i, soc);
if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
return QDF_STATUS_SUCCESS;
goto fail;
}
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s MSDU Ext Desc Alloc %d, descs = %d",
__func__, num_pool, num_ext_desc);
status = dp_tso_attach_wifi3((void *)soc);
if (status != QDF_STATUS_SUCCESS)
goto fail;
/* Initialize descriptors in TCL Rings */
if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
for (i = 0; i < soc->num_tcl_data_rings; i++) {
hal_tx_init_data_ring(soc->hal_soc,
soc->tcl_data_ring[i].hal_srng);
}
if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
hal_tx_init_data_ring(soc->hal_soc,
soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
}
/*
* Initialize command/credit ring descriptor
* Command/CREDIT ring also used for sending DATA cmds
*/
hal_tx_init_cmd_credit_ring(soc->hal_soc,
soc->tcl_cmd_credit_ring.hal_srng);
/*
* todo - Add a runtime config option to enable this.
*/
/*
* Due to multiple issues on NPR EMU, enable it selectively
* only for NPR EMU, should be removed, once NPR platforms
* are stable.
*/
soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s HAL Tx init Success", __func__);
if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
goto fail3;
return QDF_STATUS_SUCCESS;
fail:
/* Detach will take care of freeing only allocated resources */
dp_tx_soc_detach(soc);
fail3:
dp_tx_ext_desc_pool_free(soc, num_pool);
fail2:
dp_tx_delete_static_pools(soc, num_pool);
fail1:
return QDF_STATUS_E_RESOURCES;
}
/**
* dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
* @soc: core txrx main context
*
* This function initializes the following TX descriptor pools
* 1. regular sw tx descriptor pools (static pools)
* 2. TX extension descriptor pools (ME, RAW, TSO etc...)
* 3. TSO descriptor pools
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
{
uint8_t num_pool;
uint32_t num_desc;
uint32_t num_ext_desc;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
if (dp_tx_init_static_pools(soc, num_pool, num_desc))
goto fail1;
if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
goto fail2;
if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
return QDF_STATUS_SUCCESS;
if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
goto fail3;
dp_tx_flow_control_init(soc);
soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
return QDF_STATUS_SUCCESS;
fail3:
dp_tx_ext_desc_pool_deinit(soc, num_pool);
fail2:
dp_tx_deinit_static_pools(soc, num_pool);
fail1:
return QDF_STATUS_E_RESOURCES;
}
/**
* dp_tso_soc_attach() - Allocate and initialize TSO descriptors
* @txrx_soc: dp soc handle
*
* Return: QDF_STATUS - QDF_STATUS_SUCCESS
* QDF_STATUS_E_FAILURE
*/
QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
uint8_t num_pool;
uint32_t num_desc;
uint32_t num_ext_desc;
num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
return QDF_STATUS_E_FAILURE;
if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
return QDF_STATUS_E_FAILURE;
return QDF_STATUS_SUCCESS;
}
/**
* dp_tso_soc_detach() - de-initialize and free the TSO descriptors
* @txrx_soc: dp soc handle
*
* Return: QDF_STATUS - QDF_STATUS_SUCCESS
*/
QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
{
struct dp_soc *soc = (struct dp_soc *)txrx_soc;
uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
return QDF_STATUS_SUCCESS;
}

View File

@@ -171,9 +171,29 @@ void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
uint8_t num_pool,
uint16_t num_desc);
QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
uint8_t num_pool,
uint16_t num_desc);
QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev);
QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev);
QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc);
QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc);
void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
uint8_t num_pool,
uint16_t num_desc);
QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
uint8_t num_pool,
uint16_t num_desc);
QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
/**
* dp_tso_attach() - TSO Attach handler
@@ -197,8 +217,7 @@ QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
*/
QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev);
QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev);
QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev);
qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -65,7 +65,7 @@ dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
/**
* dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
* @soc Handle to DP SoC structure
* @num_pool Number of pools to allocate
* @pool_id pool to allocate
* @num_elem Number of descriptor elements per pool
*
* This function allocates memory for SW tx descriptors
@@ -91,42 +91,77 @@ dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
* Return: Status code. 0 for success.
*/
QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
uint16_t num_elem)
{
uint32_t id, count, page_id, offset, pool_id_32;
uint16_t num_desc_per_page;
struct dp_tx_desc_s *tx_desc_elem;
uint32_t desc_size;
struct dp_tx_desc_pool_s *tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
struct dp_tx_desc_pool_s *tx_desc_pool;
desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
qdf_mem_multi_pages_alloc(soc->osdev,
&tx_desc_pool->desc_pages,
desc_size, num_elem,
0, true);
desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
tx_desc_pool->elem_size = desc_size;
if (!dp_is_soc_reinit(soc))
qdf_mem_multi_pages_alloc(soc->osdev,
&tx_desc_pool->desc_pages,
desc_size, num_elem,
0, true);
if (!tx_desc_pool->desc_pages.num_pages) {
dp_err("Multi page alloc fail, tx desc");
goto fail_exit;
return QDF_STATUS_E_NOMEM;
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_tx_desc_pool_free() - Free the tx dexcriptor pools
* @soc: Handle to DP SoC structure
* @pool_id: pool to free
*
*/
void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
{
struct dp_tx_desc_pool_s *tx_desc_pool;
num_desc_per_page =
tx_desc_pool->desc_pages.num_element_per_page;
tx_desc_pool->freelist = (struct dp_tx_desc_s *)
*tx_desc_pool->desc_pages.cacheable_pages;
tx_desc_pool = &((soc)->tx_desc[pool_id]);
if (tx_desc_pool->desc_pages.num_pages)
qdf_mem_multi_pages_free(soc->osdev,
&tx_desc_pool->desc_pages, 0, true);
}
/**
* dp_tx_desc_pool_init() - Initialize Tx Descriptor pool(s)
* @soc: Handle to DP SoC structure
* @pool_id: pool to allocate
* @num_elem: Number of descriptor elements per pool
*
* Return: QDF_STATUS_SUCCESS
* QDF_STATUS_E_FAULT
*/
QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
{
uint32_t id, count, page_id, offset, pool_id_32;
struct dp_tx_desc_pool_s *tx_desc_pool;
struct dp_tx_desc_s *tx_desc_elem;
uint16_t num_desc_per_page;
uint32_t desc_size;
desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
if (qdf_mem_multi_page_link(soc->osdev,
&tx_desc_pool->desc_pages,
desc_size, num_elem, true)) {
dp_err("invalid tx desc allocation - overflow num link");
goto free_tx_desc;
dp_err("invalid tx desc allocation -overflow num link");
return QDF_STATUS_E_FAULT;
}
tx_desc_pool->freelist = (struct dp_tx_desc_s *)
*tx_desc_pool->desc_pages.cacheable_pages;
/* Set unique IDs for each Tx descriptor */
tx_desc_elem = tx_desc_pool->freelist;
count = 0;
pool_id_32 = (uint32_t)pool_id;
num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
while (tx_desc_elem) {
page_id = count / num_desc_per_page;
offset = count % num_desc_per_page;
@@ -139,365 +174,532 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
count++;
}
tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem);
TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
return QDF_STATUS_SUCCESS;
free_tx_desc:
qdf_mem_multi_pages_free(soc->osdev,
&tx_desc_pool->desc_pages, 0, true);
fail_exit:
return QDF_STATUS_E_FAULT;
}
/**
* dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
*
* dp_tx_desc_pool_deinit() - de-initialize Tx Descriptor pool(s)
* @soc Handle to DP SoC structure
* @pool_id
* @pool_id: pool to de-initialize
*
* Return:
*/
QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id)
{
struct dp_tx_desc_pool_s *tx_desc_pool =
&((soc)->tx_desc[(pool_id)]);
struct dp_tx_desc_pool_s *tx_desc_pool;
qdf_mem_multi_pages_free(soc->osdev,
&tx_desc_pool->desc_pages, 0, true);
TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
tx_desc_pool = &((soc)->tx_desc[(pool_id)]);
TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
return QDF_STATUS_SUCCESS;
TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
}
/**
* dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
* @soc Handle to DP SoC structure
* @pool_id
* dp_tx_ext_desc_pool_alloc() - allocate Tx extenstion Descriptor pool(s)
* @soc: Handle to DP SoC structure
* @num_pool: Number of pools to allocate
* @num_elem: Number of descriptor elements per pool
*
* Return: NONE
* Return - QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
uint16_t num_page;
uint32_t count;
struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
struct qdf_mem_dma_page_t *page_info;
struct qdf_mem_multi_page_t *pages;
QDF_STATUS status;
QDF_STATUS status = QDF_STATUS_SUCCESS;
qdf_dma_context_t memctx = 0;
uint8_t pool_id, count;
uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
/* Coherent tx extension descriptor alloc */
soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
soc->tx_ext_desc[pool_id].elem_count = num_elem;
memctx = qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx);
if (!dp_is_soc_reinit(soc)) {
for (pool_id = 0; pool_id < num_pool; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
qdf_mem_multi_pages_alloc(soc->osdev,
&soc->tx_ext_desc[pool_id].
desc_pages,
soc->tx_ext_desc[pool_id].elem_size,
soc->tx_ext_desc[pool_id].elem_count,
&dp_tx_ext_desc_pool->desc_pages,
elem_size,
num_elem,
memctx, false);
}
if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"ext desc page alloc fail");
status = QDF_STATUS_E_NOMEM;
goto fail_exit;
if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"ext desc page alloc fail");
status = QDF_STATUS_E_NOMEM;
goto fail_exit;
}
}
num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
/*
* Cacheable ext descriptor link alloc
* This structure also large size already
* single element is 24bytes, 2K elements are 48Kbytes
* Have to alloc multi page cacheable memory
*/
soc->tx_ext_desc[pool_id].link_elem_size =
sizeof(struct dp_tx_ext_desc_elem_s);
if (!dp_is_soc_reinit(soc)) {
for (pool_id = 0; pool_id < num_pool; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
qdf_mem_multi_pages_alloc(soc->osdev,
&soc->tx_ext_desc[pool_id].
desc_link_pages,
soc->tx_ext_desc[pool_id].
&dp_tx_ext_desc_pool->desc_link_pages,
link_elem_size,
soc->tx_ext_desc[pool_id].
elem_count,
num_elem,
0, true);
if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"ext link desc page alloc fail");
status = QDF_STATUS_E_NOMEM;
goto free_ext_desc_page;
}
}
if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"ext link desc page alloc fail");
status = QDF_STATUS_E_NOMEM;
goto free_ext_desc_page;
return status;
free_ext_desc_page:
for (count = 0; count < pool_id; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
qdf_mem_multi_pages_free(soc->osdev,
&dp_tx_ext_desc_pool->desc_link_pages,
0, true);
}
pool_id = num_pool;
fail_exit:
for (count = 0; count < pool_id; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
qdf_mem_multi_pages_free(soc->osdev,
&dp_tx_ext_desc_pool->desc_pages,
memctx, false);
}
return status;
}
/**
* dp_tx_ext_desc_pool_init() - initialize Tx extenstion Descriptor pool(s)
* @soc: Handle to DP SoC structure
* @num_pool: Number of pools to initialize
* @num_elem: Number of descriptor elements per pool
*
* Return - QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
uint32_t i;
struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
struct qdf_mem_dma_page_t *page_info;
struct qdf_mem_multi_page_t *pages;
struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
uint8_t pool_id;
QDF_STATUS status;
/* link tx descriptors into a freelist */
soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
if (qdf_mem_multi_page_link(soc->osdev,
&soc->tx_ext_desc[pool_id].desc_link_pages,
soc->tx_ext_desc[pool_id].link_elem_size,
soc->tx_ext_desc[pool_id].elem_count, true)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"ext link desc page linking fail");
status = QDF_STATUS_E_FAULT;
goto free_ext_link_desc_page;
}
for (pool_id = 0; pool_id < num_pool; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
soc->tx_ext_desc[pool_id].elem_size =
HAL_TX_EXT_DESC_WITH_META_DATA;
soc->tx_ext_desc[pool_id].link_elem_size =
sizeof(struct dp_tx_ext_desc_elem_s);
soc->tx_ext_desc[pool_id].elem_count = num_elem;
/* Assign coherent memory pointer into linked free list */
pages = &soc->tx_ext_desc[pool_id].desc_pages;
page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
c_elem = soc->tx_ext_desc[pool_id].freelist;
p_elem = c_elem;
for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
if (!(count % pages->num_element_per_page)) {
dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
*dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
if (qdf_mem_multi_page_link(soc->osdev,
&dp_tx_ext_desc_pool->
desc_link_pages,
dp_tx_ext_desc_pool->link_elem_size,
dp_tx_ext_desc_pool->elem_count,
true)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"ext link desc page linking fail");
status = QDF_STATUS_E_FAULT;
goto fail;
}
/* Assign coherent memory pointer into linked free list */
pages = &dp_tx_ext_desc_pool->desc_pages;
page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
c_elem = dp_tx_ext_desc_pool->freelist;
p_elem = c_elem;
for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
if (!(i % pages->num_element_per_page)) {
/**
* First element for new page,
* should point next page
*/
if (!pages->dma_pages->page_v_addr_start) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR,
"link over flow");
status = QDF_STATUS_E_FAULT;
goto free_ext_link_desc_page;
if (!pages->dma_pages->page_v_addr_start) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_ERROR,
"link over flow");
status = QDF_STATUS_E_FAULT;
goto fail;
}
c_elem->vaddr =
(void *)page_info->page_v_addr_start;
c_elem->paddr = page_info->page_p_addr;
page_info++;
} else {
c_elem->vaddr = (void *)(p_elem->vaddr +
dp_tx_ext_desc_pool->elem_size);
c_elem->paddr = (p_elem->paddr +
dp_tx_ext_desc_pool->elem_size);
}
c_elem->vaddr = (void *)page_info->page_v_addr_start;
c_elem->paddr = page_info->page_p_addr;
page_info++;
} else {
c_elem->vaddr = (void *)(p_elem->vaddr +
soc->tx_ext_desc[pool_id].elem_size);
c_elem->paddr = (p_elem->paddr +
soc->tx_ext_desc[pool_id].elem_size);
p_elem = c_elem;
c_elem = c_elem->next;
if (!c_elem)
break;
}
p_elem = c_elem;
c_elem = c_elem->next;
if (!c_elem)
break;
dp_tx_ext_desc_pool->num_free = num_elem;
qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
}
soc->tx_ext_desc[pool_id].num_free = num_elem;
qdf_spinlock_create(&soc->tx_ext_desc[pool_id].lock);
return QDF_STATUS_SUCCESS;
free_ext_link_desc_page:
qdf_mem_multi_pages_free(soc->osdev,
&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
free_ext_desc_page:
qdf_mem_multi_pages_free(soc->osdev,
&soc->tx_ext_desc[pool_id].desc_pages,
qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
false);
fail_exit:
fail:
return status;
}
/**
* dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
* dp_tx_ext_desc_pool_free() - free Tx extenstion Descriptor pool(s)
* @soc: Handle to DP SoC structure
* @pool_id: extension descriptor pool id
* @num_pool: Number of pools to free
*
* Return: NONE
*/
QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
{
qdf_mem_multi_pages_free(soc->osdev,
&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
uint8_t pool_id;
struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
qdf_dma_context_t memctx = 0;
qdf_mem_multi_pages_free(soc->osdev,
&soc->tx_ext_desc[pool_id].desc_pages,
qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
false);
for (pool_id = 0; pool_id < num_pool; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
qdf_spinlock_destroy(&soc->tx_ext_desc[pool_id].lock);
return QDF_STATUS_SUCCESS;
qdf_mem_multi_pages_free(soc->osdev,
&dp_tx_ext_desc_pool->desc_link_pages,
0, true);
qdf_mem_multi_pages_free(soc->osdev,
&dp_tx_ext_desc_pool->desc_pages,
memctx, false);
}
}
/**
* dp_tx_tso_desc_pool_alloc() - allocate tx tso descriptor pool
* dp_tx_ext_desc_pool_deinit() - deinit Tx extenstion Descriptor pool(s)
* @soc: Handle to DP SoC structure
* @pool_id: tso descriptor pool id
* @num_elem: number of element
* @num_pool: Number of pools to de-initialize
*
* Return: QDF_STATUS_SUCCESS
*/
void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
{
uint8_t pool_id;
struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
for (pool_id = 0; pool_id < num_pool; pool_id++) {
dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
}
}
#if defined(FEATURE_TSO)
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
/**
* dp_tx_tso_desc_pool_alloc() - allocate TSO Descriptor pool(s)
* @soc: Handle to DP SoC structure
* @num_pool: Number of pools to allocate
* @num_elem: Number of descriptor elements per pool
*
* Return - QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
struct dp_tx_tso_seg_pool_s *tso_desc_pool;
uint32_t desc_size;
uint32_t desc_size, pool_id, i;
tso_desc_pool = &soc->tx_tso_desc[pool_id];
tso_desc_pool->num_free = 0;
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
if (!dp_is_soc_reinit(soc))
for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_desc_pool = &soc->tx_tso_desc[pool_id];
tso_desc_pool->num_free = 0;
qdf_mem_multi_pages_alloc(soc->osdev,
&tso_desc_pool->desc_pages,
desc_size,
num_elem, 0, true);
if (!tso_desc_pool->desc_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %pK pool_id %d"),
soc, pool_id);
return QDF_STATUS_E_NOMEM;
if (!tso_desc_pool->desc_pages.num_pages) {
dp_err("Multi page alloc fail, tx desc");
goto fail;
}
}
tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
*tso_desc_pool->desc_pages.cacheable_pages;
tso_desc_pool->num_free = num_elem;
if (qdf_mem_multi_page_link(soc->osdev,
&tso_desc_pool->desc_pages,
desc_size,
num_elem, true)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"invalid tso desc allocation - overflow num link");
goto free_tso_desc;
}
TSO_DEBUG("Number of free descriptors: %u\n", tso_desc_pool->num_free);
tso_desc_pool->pool_size = num_elem;
qdf_spinlock_create(&tso_desc_pool->lock);
return QDF_STATUS_SUCCESS;
free_tso_desc:
qdf_mem_multi_pages_free(soc->osdev,
&tso_desc_pool->desc_pages, 0, true);
return QDF_STATUS_E_FAULT;
fail:
for (i = 0; i < pool_id; i++) {
tso_desc_pool = &soc->tx_tso_desc[i];
qdf_mem_multi_pages_free(soc->osdev,
&tso_desc_pool->desc_pages,
0, true);
}
return QDF_STATUS_E_NOMEM;
}
/**
* dp_tx_tso_desc_pool_free() - free tx tso descriptor pool
* dp_tx_tso_desc_pool_free() - free TSO Descriptor pool(s)
* @soc: Handle to DP SoC structure
* @pool_id: extension descriptor pool id
* @num_pool: Number of pools to free
*
* Return: NONE
*/
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
{
struct dp_tx_tso_seg_pool_s *tso_desc_pool;
uint32_t pool_id;
tso_desc_pool = &soc->tx_tso_desc[pool_id];
qdf_spin_lock_bh(&tso_desc_pool->lock);
qdf_mem_multi_pages_free(soc->osdev,
&tso_desc_pool->desc_pages, 0, true);
tso_desc_pool->freelist = NULL;
tso_desc_pool->num_free = 0;
tso_desc_pool->pool_size = 0;
qdf_spin_unlock_bh(&tso_desc_pool->lock);
qdf_spinlock_destroy(&tso_desc_pool->lock);
return;
for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_desc_pool = &soc->tx_tso_desc[pool_id];
qdf_mem_multi_pages_free(soc->osdev,
&tso_desc_pool->desc_pages, 0, true);
}
}
/**
* dp_tx_tso_desc_pool_init() - initialize TSO Descriptor pool(s)
* @soc: Handle to DP SoC structure
* @num_pool: Number of pools to initialize
* @num_elem: Number of descriptor elements per pool
*
* Return - QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
struct dp_tx_tso_seg_pool_s *tso_desc_pool;
uint32_t desc_size, pool_id;
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_desc_pool = &soc->tx_tso_desc[pool_id];
if (qdf_mem_multi_page_link(soc->osdev,
&tso_desc_pool->desc_pages,
desc_size,
num_elem, true)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"invalid tso desc allocation - overflow num link");
return QDF_STATUS_E_FAULT;
}
tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
*tso_desc_pool->desc_pages.cacheable_pages;
tso_desc_pool->num_free = num_elem;
TSO_DEBUG("Number of free descriptors: %u\n",
tso_desc_pool->num_free);
tso_desc_pool->pool_size = num_elem;
qdf_spinlock_create(&tso_desc_pool->lock);
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_tx_tso_desc_pool_deinit() - deinitialize TSO Descriptor pool(s)
* @soc: Handle to DP SoC structure
* @num_pool: Number of pools to free
*
*/
void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
{
struct dp_tx_tso_seg_pool_s *tso_desc_pool;
uint32_t pool_id;
for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_desc_pool = &soc->tx_tso_desc[pool_id];
qdf_spin_lock_bh(&tso_desc_pool->lock);
tso_desc_pool->freelist = NULL;
tso_desc_pool->num_free = 0;
tso_desc_pool->pool_size = 0;
qdf_spin_unlock_bh(&tso_desc_pool->lock);
qdf_spinlock_destroy(&tso_desc_pool->lock);
}
}
/**
* dp_tx_tso_num_seg_pool_alloc() - Allocate descriptors that tracks the
* fragments in each tso segment
*
* @soc: handle to dp soc structure
* @pool_id: descriptor pool id
* @num_pool: number of pools to allocate
* @num_elem: total number of descriptors to be allocated
*
* Return - QDF_STATUS_SUCCESS
* QDF_STATUS_E_NOMEM
*/
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
uint32_t desc_size;
uint32_t desc_size, pool_id, i;
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
tso_num_seg_pool->num_free = 0;
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
if (!dp_is_soc_reinit(soc))
for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
tso_num_seg_pool->num_free = 0;
qdf_mem_multi_pages_alloc(soc->osdev,
&tso_num_seg_pool->desc_pages,
desc_size,
num_elem, 0, true);
if (!tso_num_seg_pool->desc_pages.num_pages) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("Alloc Failed %pK pool_id %d"),
soc, pool_id);
return QDF_STATUS_E_NOMEM;
if (!tso_num_seg_pool->desc_pages.num_pages) {
dp_err("Multi page alloc fail, tso_num_seg_pool");
goto fail;
}
}
if (qdf_mem_multi_page_link(soc->osdev,
&tso_num_seg_pool->desc_pages,
desc_size,
num_elem, true)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"invalid tso desc allocation - overflow num link");
goto fail;
}
tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
*tso_num_seg_pool->desc_pages.cacheable_pages;
tso_num_seg_pool->num_free = num_elem;
tso_num_seg_pool->num_seg_pool_size = num_elem;
qdf_spinlock_create(&tso_num_seg_pool->lock);
return QDF_STATUS_SUCCESS;
fail:
qdf_mem_multi_pages_free(soc->osdev,
&tso_num_seg_pool->desc_pages, 0, true);
for (i = 0; i < pool_id; i++) {
tso_num_seg_pool = &soc->tx_tso_num_seg[i];
qdf_mem_multi_pages_free(soc->osdev,
&tso_num_seg_pool->desc_pages,
0, true);
}
return QDF_STATUS_E_NOMEM;
}
/**
* dp_tx_tso_num_seg_pool_free() - free pool of descriptors that tracks
* the fragments in tso segment
*
* dp_tx_tso_num_seg_pool_free() - free descriptors that tracks the
* fragments in each tso segment
*
* @soc: handle to dp soc structure
* @pool_id: descriptor pool_id
* @num_pool: number of pools to free
*/
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
{
struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
uint32_t pool_id;
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
qdf_spin_lock_bh(&tso_num_seg_pool->lock);
qdf_mem_multi_pages_free(soc->osdev,
&tso_num_seg_pool->desc_pages, 0, true);
tso_num_seg_pool->freelist = NULL;
tso_num_seg_pool->num_free = 0;
tso_num_seg_pool->num_seg_pool_size = 0;
qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
qdf_spinlock_destroy(&tso_num_seg_pool->lock);
return;
for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
qdf_mem_multi_pages_free(soc->osdev,
&tso_num_seg_pool->desc_pages,
0, true);
}
}
/**
* dp_tx_tso_num_seg_pool_init() - Initialize descriptors that tracks the
* fragments in each tso segment
*
* @soc: handle to dp soc structure
* @num_pool: number of pools to initialize
* @num_elem: total number of descriptors to be initialized
*
* Return - QDF_STATUS_SUCCESS
* QDF_STATUS_E_FAULT
*/
QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
uint32_t desc_size, pool_id;
desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
if (qdf_mem_multi_page_link(soc->osdev,
&tso_num_seg_pool->desc_pages,
desc_size,
num_elem, true)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"invalid tso desc allocation - overflow num link");
return QDF_STATUS_E_FAULT;
}
tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
*tso_num_seg_pool->desc_pages.cacheable_pages;
tso_num_seg_pool->num_free = num_elem;
tso_num_seg_pool->num_seg_pool_size = num_elem;
qdf_spinlock_create(&tso_num_seg_pool->lock);
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_tx_tso_num_seg_pool_deinit() - de-initialize descriptors that tracks the
* fragments in each tso segment
*
* @soc: handle to dp soc structure
* @num_pool: number of pools to de-initialize
*
* Return - QDF_STATUS_SUCCESS
* QDF_STATUS_E_FAULT
*/
void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
{
struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
uint32_t pool_id;
for (pool_id = 0; pool_id < num_pool; pool_id++) {
tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
qdf_spin_lock_bh(&tso_num_seg_pool->lock);
tso_num_seg_pool->freelist = NULL;
tso_num_seg_pool->num_free = 0;
tso_num_seg_pool->num_seg_pool_size = 0;
qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
qdf_spinlock_destroy(&tso_num_seg_pool->lock);
}
}
#else
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
return QDF_STATUS_SUCCESS;
}
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
{
return;
}
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem)
QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
return QDF_STATUS_SUCCESS;
}
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
{
}
void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
{
}
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
return QDF_STATUS_SUCCESS;
}
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
{
}
QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
uint16_t num_elem)
{
return QDF_STATUS_SUCCESS;
}
void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
{
return;
}
#endif

View File

@@ -78,17 +78,32 @@ do { \
#define MAX_POOL_BUFF_COUNT 10000
QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
uint16_t num_elem);
QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
uint16_t num_elem);
QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
uint16_t num_elem);
QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem);
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
void dp_tx_flow_control_init(struct dp_soc *);

View File

@@ -274,6 +274,12 @@ struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
return NULL;
}
if (dp_tx_desc_pool_init(soc, flow_pool_id, flow_pool_size)) {
dp_tx_desc_pool_free(soc, flow_pool_id);
qdf_spin_unlock_bh(&pool->flow_pool_lock);
return NULL;
}
stop_threshold = wlan_cfg_get_tx_flow_stop_queue_th(soc->wlan_cfg_ctx);
start_threshold = stop_threshold +
wlan_cfg_get_tx_flow_start_queue_offset(soc->wlan_cfg_ctx);
@@ -338,6 +344,7 @@ int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
}
/* We have all the descriptors for the pool, we can delete the pool */
dp_tx_desc_pool_deinit(soc, pool->flow_pool_id);
dp_tx_desc_pool_free(soc, pool->flow_pool_id);
qdf_spin_unlock_bh(&pool->flow_pool_lock);
return 0;
@@ -529,8 +536,8 @@ static inline void dp_tx_desc_pool_dealloc(struct dp_soc *soc)
if (!tx_desc_pool->desc_pages.num_pages)
continue;
if (dp_tx_desc_pool_free(soc, i) != QDF_STATUS_SUCCESS)
dp_err("Tx Desc Pool:%d Free failed", i);
dp_tx_desc_pool_deinit(soc, i);
dp_tx_desc_pool_free(soc, i);
}
}

View File

@@ -1123,15 +1123,6 @@ struct dp_soc {
struct dp_txrx_pool_stats pool_stats;
#endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
/*
* Re-use memory section ends. reuse memory indicator.
* Everything above this variable "dp_soc_reinit" is retained across
* WiFi up/down for AP use-cases.
* Everything below this variable "dp_soc_reinit" is reset during
* dp_soc_deinit.
*/
bool dp_soc_reinit;
uint32_t wbm_idle_scatter_buf_size;
/* VDEVs on this SOC */
@@ -1328,11 +1319,11 @@ struct dp_soc {
uint8_t fisa_enable;
#endif
#endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
/* Full monitor mode support */
bool full_mon_mode;
/* SG supported for msdu continued packets from wbm release ring */
bool wbm_release_desc_rx_sg_support;
bool peer_map_attach_success;
};
#ifdef IPA_OFFLOAD
@@ -1547,12 +1538,6 @@ struct dp_pdev {
/* Stuck count on monitor destination ring MPDU process */
uint32_t mon_dest_ring_stuck_cnt;
/*
* re-use memory section ends
* reuse memory/deinit indicator
*
* DO NOT CHANGE NAME OR MOVE THIS VARIABLE
*/
bool pdev_deinit;
/* pdev status down or up required to handle dynamic hw

View File

@@ -635,6 +635,18 @@ wlan_cfg_pdev_attach(struct cdp_ctrl_objmgr_psoc *psoc)
return wlan_cfg_ctx;
}
void wlan_cfg_set_mon_delayed_replenish_entries(
struct wlan_cfg_dp_soc_ctxt *cfg,
uint32_t val)
{
cfg->delayed_replenish_entries = val;
}
int wlan_cfg_get_mon_delayed_replenish_entries(struct wlan_cfg_dp_soc_ctxt *cfg)
{
return cfg->delayed_replenish_entries;
}
void wlan_cfg_pdev_detach(struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx)
{
if (wlan_cfg_ctx)
@@ -1226,6 +1238,17 @@ wlan_cfg_get_dp_caps(struct wlan_cfg_dp_soc_ctxt *cfg,
}
}
void wlan_cfg_set_tso_desc_attach_defer(struct wlan_cfg_dp_soc_ctxt *cfg,
bool val)
{
cfg->is_tso_desc_attach_defer = val;
}
bool wlan_cfg_is_tso_desc_attach_defer(struct wlan_cfg_dp_soc_ctxt *cfg)
{
return cfg->is_tso_desc_attach_defer;
}
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
/**
* wlan_cfg_get_tx_flow_stop_queue_th() - Get flow control stop threshold

View File

@@ -280,6 +280,8 @@ struct wlan_cfg_dp_soc_ctxt {
uint8_t *rx_toeplitz_hash_key;
uint8_t pktlog_buffer_size;
uint8_t is_rx_fisa_enabled;
bool is_tso_desc_attach_defer;
uint32_t delayed_replenish_entries;
};
/**
@@ -365,6 +367,28 @@ int wlan_cfg_set_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg,
int context, int mask);
int wlan_cfg_set_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg,
int context, int mask);
/**
* wlan_cfg_set_mon_delayed_replenish_entries() - number of buffers to replenish
* for monitor buffer ring at initialization
* @wlan_cfg_ctx - Configuration Handle
* @replenish_entries - number of entries to replenish at initialization
*
*/
void wlan_cfg_set_mon_delayed_replenish_entries(struct wlan_cfg_dp_soc_ctxt
*wlan_cfg_ctx,
uint32_t replenish_entries);
/**
* wlan_cfg_get_mon_delayed_replenish_entries() - get num of buffer to replenish
* for monitor buffer ring at initialization
* @wlan_cfg_ctx - Configuration Handle
* @replenish_entries - number of entries to replenish at initialization
*
* Return: delayed_replenish_entries;
*/
int wlan_cfg_get_mon_delayed_replenish_entries(struct wlan_cfg_dp_soc_ctxt
*wlan_cfg_ctx);
/**
* wlan_cfg_get_num_contexts() - Number of interrupt contexts to be registered
* @wlan_cfg_ctx - Configuration Handle
@@ -1290,3 +1314,9 @@ void wlan_cfg_fill_interrupt_mask(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx,
*/
bool wlan_cfg_is_rx_fisa_enabled(struct wlan_cfg_dp_soc_ctxt *cfg);
#endif
void wlan_cfg_set_tso_desc_attach_defer(struct wlan_cfg_dp_soc_ctxt *cfg,
bool val);
bool wlan_cfg_is_tso_desc_attach_defer(struct wlan_cfg_dp_soc_ctxt *cfg);