qcacmn: HAL RX-TLV changes for beryllium

Add HAL rx tlv changes for WCN7850

Change-Id: Ie76c608ed57c6a4f8adac97e1efc7888d2036f52
CRs-Fixed: 2891049
This commit is contained in:
Rakesh Pillai
2021-02-16 23:33:36 -08:00
parent e135b3e106
commit 27d6b43bfb
10 changed files with 2294 additions and 299 deletions

View File

@@ -15,6 +15,9 @@
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE. * PERFORMANCE OF THIS SOFTWARE.
*/ */
#include <dp_internal.h>
#include <dp_htt.h>
#include "dp_be.h" #include "dp_be.h"
#include "dp_be_tx.h" #include "dp_be_tx.h"
#include "dp_be_rx.h" #include "dp_be_rx.h"
@@ -96,6 +99,117 @@ qdf_size_t dp_get_soc_context_size_be(void)
return sizeof(struct dp_soc_be); return sizeof(struct dp_soc_be);
} }
/**
* dp_rxdma_ring_sel_cfg_be() - Setup RXDMA ring config
* @soc: Common DP soc handle
*
* Return: QDF_STATUS
*/
static QDF_STATUS
dp_rxdma_ring_sel_cfg_be(struct dp_soc *soc)
{
int i;
int mac_id;
struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
struct dp_srng *rx_mac_srng;
QDF_STATUS status = QDF_STATUS_SUCCESS;
/*
* In Beryllium chipset msdu_start, mpdu_end
* and rx_attn are part of msdu_end/mpdu_start
*/
htt_tlv_filter.msdu_start = 0;
htt_tlv_filter.mpdu_end = 0;
htt_tlv_filter.attention = 0;
htt_tlv_filter.mpdu_start = 1;
htt_tlv_filter.msdu_end = 1;
htt_tlv_filter.packet = 1;
htt_tlv_filter.packet_header = 1;
htt_tlv_filter.ppdu_start = 0;
htt_tlv_filter.ppdu_end = 0;
htt_tlv_filter.ppdu_end_user_stats = 0;
htt_tlv_filter.ppdu_end_user_stats_ext = 0;
htt_tlv_filter.ppdu_end_status_done = 0;
htt_tlv_filter.enable_fp = 1;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_mo = 0;
htt_tlv_filter.fp_mgmt_filter = 0;
htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
FILTER_DATA_MCAST |
FILTER_DATA_DATA);
htt_tlv_filter.mo_mgmt_filter = 0;
htt_tlv_filter.mo_ctrl_filter = 0;
htt_tlv_filter.mo_data_filter = 0;
htt_tlv_filter.md_data_filter = 0;
htt_tlv_filter.offset_valid = true;
/* Not subscribing to mpdu_end, msdu_start and rx_attn */
htt_tlv_filter.rx_mpdu_end_offset = 0;
htt_tlv_filter.rx_msdu_start_offset = 0;
htt_tlv_filter.rx_attn_offset = 0;
htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
htt_tlv_filter.rx_header_offset =
hal_rx_pkt_tlv_offset_get(soc->hal_soc);
htt_tlv_filter.rx_mpdu_start_offset =
hal_rx_mpdu_start_offset_get(soc->hal_soc);
htt_tlv_filter.rx_msdu_end_offset =
hal_rx_msdu_end_offset_get(soc->hal_soc);
dp_info("TLV subscription\n"
"msdu_start %d, mpdu_end %d, attention %d"
"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n"
"TLV offsets\n"
"msdu_start %d, mpdu_end %d, attention %d"
"mpdu_start %d, msdu_end %d, pkt_hdr %d, pkt %d\n",
htt_tlv_filter.msdu_start,
htt_tlv_filter.mpdu_end,
htt_tlv_filter.attention,
htt_tlv_filter.mpdu_start,
htt_tlv_filter.msdu_end,
htt_tlv_filter.packet_header,
htt_tlv_filter.packet,
htt_tlv_filter.rx_msdu_start_offset,
htt_tlv_filter.rx_mpdu_end_offset,
htt_tlv_filter.rx_attn_offset,
htt_tlv_filter.rx_mpdu_start_offset,
htt_tlv_filter.rx_msdu_end_offset,
htt_tlv_filter.rx_header_offset,
htt_tlv_filter.rx_packet_offset);
for (i = 0; i < MAX_PDEV_CNT; i++) {
struct dp_pdev *pdev = soc->pdev_list[i];
if (!pdev)
continue;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
int mac_for_pdev =
dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
/*
* Obtain lmac id from pdev to access the LMAC ring
* in soc context
*/
int lmac_id =
dp_get_lmac_id_for_pdev_id(soc, mac_id,
pdev->pdev_id);
rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
rx_mac_srng->hal_srng,
RXDMA_BUF, RX_DATA_BUFFER_SIZE,
&htt_tlv_filter);
}
}
return status;
}
void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops) void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
{ {
arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_be; arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_be;
@@ -110,4 +224,6 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
arch_ops->txrx_vdev_detach = dp_vdev_detach_be; arch_ops->txrx_vdev_detach = dp_vdev_detach_be;
arch_ops->tx_comp_get_params_from_hal_desc = arch_ops->tx_comp_get_params_from_hal_desc =
dp_tx_comp_get_params_from_hal_desc_be; dp_tx_comp_get_params_from_hal_desc_be;
arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_be;
} }

View File

@@ -421,6 +421,20 @@ static inline void dp_wds_ext_peer_init(struct dp_peer *peer)
} }
#endif /* QCA_SUPPORT_WDS_EXTENDED */ #endif /* QCA_SUPPORT_WDS_EXTENDED */
#ifdef QCA_HOST2FW_RXBUF_RING
static inline
struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
{
return &pdev->rx_mac_buf_ring[lmac_id];
}
#else
static inline
struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
{
return &pdev->soc->rx_refill_buf_ring[lmac_id];
}
#endif
/** /**
* The lmac ID for a particular channel band is fixed. * The lmac ID for a particular channel band is fixed.
* 2.4GHz band uses lmac_id = 1 * 2.4GHz band uses lmac_id = 1
@@ -2440,7 +2454,7 @@ static inline uint32_t dp_history_get_next_index(qdf_atomic_t *curr_idx,
* *
* Return: None * Return: None
*/ */
void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding); void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding);
/** /**
* dp_soc_is_full_mon_enable () - Return if full monitor mode is enabled * dp_soc_is_full_mon_enable () - Return if full monitor mode is enabled

View File

@@ -5461,18 +5461,6 @@ static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
} }
#endif #endif
#ifdef QCA_HOST2FW_RXBUF_RING
static struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
{
return &pdev->rx_mac_buf_ring[lmac_id];
}
#else
static struct dp_srng *dp_get_rxdma_ring(struct dp_pdev *pdev, int lmac_id)
{
return &pdev->soc->rx_refill_buf_ring[lmac_id];
}
#endif
/* /*
* dp_rxdma_ring_config() - configure the RX DMA rings * dp_rxdma_ring_config() - configure the RX DMA rings
* *
@@ -5646,169 +5634,6 @@ static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
} }
#endif #endif
#ifdef NO_RX_PKT_HDR_TLV
static QDF_STATUS
dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
{
int i;
int mac_id;
struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
QDF_STATUS status = QDF_STATUS_SUCCESS;
htt_tlv_filter.mpdu_start = 1;
htt_tlv_filter.msdu_start = 1;
htt_tlv_filter.mpdu_end = 1;
htt_tlv_filter.msdu_end = 1;
htt_tlv_filter.attention = 1;
htt_tlv_filter.packet = 1;
htt_tlv_filter.packet_header = 0;
htt_tlv_filter.ppdu_start = 0;
htt_tlv_filter.ppdu_end = 0;
htt_tlv_filter.ppdu_end_user_stats = 0;
htt_tlv_filter.ppdu_end_user_stats_ext = 0;
htt_tlv_filter.ppdu_end_status_done = 0;
htt_tlv_filter.enable_fp = 1;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_mo = 0;
htt_tlv_filter.fp_mgmt_filter = 0;
htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
FILTER_DATA_MCAST |
FILTER_DATA_DATA);
htt_tlv_filter.mo_mgmt_filter = 0;
htt_tlv_filter.mo_ctrl_filter = 0;
htt_tlv_filter.mo_data_filter = 0;
htt_tlv_filter.md_data_filter = 0;
htt_tlv_filter.offset_valid = true;
htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
/*Not subscribing rx_pkt_header*/
htt_tlv_filter.rx_header_offset = 0;
htt_tlv_filter.rx_mpdu_start_offset =
hal_rx_mpdu_start_offset_get(soc->hal_soc);
htt_tlv_filter.rx_mpdu_end_offset =
hal_rx_mpdu_end_offset_get(soc->hal_soc);
htt_tlv_filter.rx_msdu_start_offset =
hal_rx_msdu_start_offset_get(soc->hal_soc);
htt_tlv_filter.rx_msdu_end_offset =
hal_rx_msdu_end_offset_get(soc->hal_soc);
htt_tlv_filter.rx_attn_offset =
hal_rx_attn_offset_get(soc->hal_soc);
for (i = 0; i < MAX_PDEV_CNT; i++) {
struct dp_pdev *pdev = soc->pdev_list[i];
if (!pdev)
continue;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
int mac_for_pdev =
dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
/*
* Obtain lmac id from pdev to access the LMAC ring
* in soc context
*/
int lmac_id =
dp_get_lmac_id_for_pdev_id(soc, mac_id,
pdev->pdev_id);
htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
soc->rx_refill_buf_ring[lmac_id].
hal_srng,
RXDMA_BUF, RX_DATA_BUFFER_SIZE,
&htt_tlv_filter);
}
}
return status;
}
#else
static QDF_STATUS
dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
{
int i;
int mac_id;
struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
struct dp_srng *rx_mac_srng;
QDF_STATUS status = QDF_STATUS_SUCCESS;
htt_tlv_filter.mpdu_start = 1;
htt_tlv_filter.msdu_start = 1;
htt_tlv_filter.mpdu_end = 1;
htt_tlv_filter.msdu_end = 1;
htt_tlv_filter.attention = 1;
htt_tlv_filter.packet = 1;
htt_tlv_filter.packet_header = 1;
htt_tlv_filter.ppdu_start = 0;
htt_tlv_filter.ppdu_end = 0;
htt_tlv_filter.ppdu_end_user_stats = 0;
htt_tlv_filter.ppdu_end_user_stats_ext = 0;
htt_tlv_filter.ppdu_end_status_done = 0;
htt_tlv_filter.enable_fp = 1;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_mo = 0;
htt_tlv_filter.fp_mgmt_filter = 0;
htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
FILTER_DATA_MCAST |
FILTER_DATA_DATA);
htt_tlv_filter.mo_mgmt_filter = 0;
htt_tlv_filter.mo_ctrl_filter = 0;
htt_tlv_filter.mo_data_filter = 0;
htt_tlv_filter.md_data_filter = 0;
htt_tlv_filter.offset_valid = true;
htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
htt_tlv_filter.rx_header_offset =
hal_rx_pkt_tlv_offset_get(soc->hal_soc);
htt_tlv_filter.rx_mpdu_start_offset =
hal_rx_mpdu_start_offset_get(soc->hal_soc);
htt_tlv_filter.rx_mpdu_end_offset =
hal_rx_mpdu_end_offset_get(soc->hal_soc);
htt_tlv_filter.rx_msdu_start_offset =
hal_rx_msdu_start_offset_get(soc->hal_soc);
htt_tlv_filter.rx_msdu_end_offset =
hal_rx_msdu_end_offset_get(soc->hal_soc);
htt_tlv_filter.rx_attn_offset =
hal_rx_attn_offset_get(soc->hal_soc);
for (i = 0; i < MAX_PDEV_CNT; i++) {
struct dp_pdev *pdev = soc->pdev_list[i];
if (!pdev)
continue;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
int mac_for_pdev =
dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
/*
* Obtain lmac id from pdev to access the LMAC ring
* in soc context
*/
int lmac_id =
dp_get_lmac_id_for_pdev_id(soc, mac_id,
pdev->pdev_id);
rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
rx_mac_srng->hal_srng,
RXDMA_BUF, RX_DATA_BUFFER_SIZE,
&htt_tlv_filter);
}
}
return status;
}
#endif
/* /*
* dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
* *
@@ -5927,7 +5752,7 @@ dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
return status; return status;
} }
status = dp_rxdma_ring_sel_cfg(soc); status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
if (status != QDF_STATUS_SUCCESS) { if (status != QDF_STATUS_SUCCESS) {
dp_err("Failed to send htt ring config message to target"); dp_err("Failed to send htt ring config message to target");
return status; return status;
@@ -12906,6 +12731,8 @@ dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
soc->ctrl_psoc = ctrl_psoc; soc->ctrl_psoc = ctrl_psoc;
soc->osdev = qdf_osdev; soc->osdev = qdf_osdev;
soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS; soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
&soc->rx_mon_pkt_tlv_size);
dp_configure_arch_ops(soc); dp_configure_arch_ops(soc);

View File

@@ -775,7 +775,7 @@ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
center_chan_freq); center_chan_freq);
} }
rx_info->rs_channel = primary_chan_num; rx_info->rs_channel = primary_chan_num;
pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr); nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
@@ -937,9 +937,7 @@ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
struct ieee80211_frame *wh; struct ieee80211_frame *wh;
qdf_nbuf_t curr_nbuf, next_nbuf; qdf_nbuf_t curr_nbuf, next_nbuf;
uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) { if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
dp_rx_debug("%pK: Drop decapped frames", soc); dp_rx_debug("%pK: Drop decapped frames", soc);
@@ -995,7 +993,7 @@ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
out: out:
msg.wh = wh; msg.wh = wh;
qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN); qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size);
msg.nbuf = mpdu; msg.nbuf = mpdu;
msg.vdev_id = vdev->vdev_id; msg.vdev_id = vdev->vdev_id;
@@ -1058,7 +1056,7 @@ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
struct dp_vdev *vdev = NULL; struct dp_vdev *vdev = NULL;
struct ieee80211_frame *wh; struct ieee80211_frame *wh;
uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu); uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr); uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
wh = (struct ieee80211_frame *)rx_pkt_hdr; wh = (struct ieee80211_frame *)rx_pkt_hdr;
@@ -1138,23 +1136,24 @@ void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
/** /**
* dp_rx_print_offload_info() - Print offload info from RX TLV * dp_rx_print_offload_info() - Print offload info from RX TLV
* @soc: dp soc handle * @soc: dp soc handle
* @rx_tlv: RX TLV for which offload information is to be printed * @msdu: MSDU for which the offload info is to be printed
* *
* Return: None * Return: None
*/ */
static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv) static void dp_rx_print_offload_info(struct dp_soc *soc,
qdf_nbuf_t msdu)
{ {
dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------"); dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv)); dp_verbose_debug("lro_eligible 0x%x",
dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv)); QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu));
dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc, dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu));
rx_tlv)); dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu));
dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv)); dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu));
dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv)); dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu));
dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv)); dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu));
dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)); dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu));
dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv)); dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu));
dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv)); dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu));
dp_verbose_debug("---------------------------------------------------------"); dp_verbose_debug("---------------------------------------------------------");
} }
@@ -1171,38 +1170,31 @@ static
void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
{ {
struct hal_offload_info offload_info;
if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
return; return;
/* Filling up RX offload info only for TCP packets */ /* Filling up RX offload info only for TCP packets */
if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv)) if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info))
return; return;
*rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1; *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible;
HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv); QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack;
QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) = QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
hal_rx_tlv_get_tcp_chksum(soc->hal_soc, hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
rx_tlv); rx_tlv);
QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num;
HAL_RX_TLV_GET_TCP_SEQ(rx_tlv); QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num;
QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win;
HAL_RX_TLV_GET_TCP_ACK(rx_tlv); QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto;
QDF_NBUF_CB_RX_TCP_WIN(msdu) = QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto;
HAL_RX_TLV_GET_TCP_WIN(rx_tlv); QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset;
QDF_NBUF_CB_RX_TCP_PROTO(msdu) = QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id;
HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
HAL_RX_TLV_GET_IPV6(rx_tlv);
QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
QDF_NBUF_CB_RX_FLOW_ID(msdu) =
HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
dp_rx_print_offload_info(soc, rx_tlv); dp_rx_print_offload_info(soc, msdu);
} }
#else #else
static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
@@ -1214,24 +1206,26 @@ static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
/** /**
* dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
* *
* @soc: DP soc handle
* @nbuf: pointer to msdu. * @nbuf: pointer to msdu.
* @mpdu_len: mpdu length * @mpdu_len: mpdu length
* *
* Return: returns true if nbuf is last msdu of mpdu else retuns false. * Return: returns true if nbuf is last msdu of mpdu else retuns false.
*/ */
static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len) static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc,
qdf_nbuf_t nbuf, uint16_t *mpdu_len)
{ {
bool last_nbuf; bool last_nbuf;
if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) { if (*mpdu_len > (RX_DATA_BUFFER_SIZE - soc->rx_pkt_tlv_size)) {
qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE); qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
last_nbuf = false; last_nbuf = false;
} else { } else {
qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN)); qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + soc->rx_pkt_tlv_size));
last_nbuf = true; last_nbuf = true;
} }
*mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN); *mpdu_len -= (RX_DATA_BUFFER_SIZE - soc->rx_pkt_tlv_size);
return last_nbuf; return last_nbuf;
} }
@@ -1267,8 +1261,8 @@ qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
*/ */
if (qdf_nbuf_is_rx_chfrag_start(nbuf) && if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
qdf_nbuf_is_rx_chfrag_end(nbuf)) { qdf_nbuf_is_rx_chfrag_end(nbuf)) {
qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN); qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size);
qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
return nbuf; return nbuf;
} }
@@ -1291,7 +1285,7 @@ qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
* nbufs will form the frag_list of the parent nbuf. * nbufs will form the frag_list of the parent nbuf.
*/ */
qdf_nbuf_set_rx_chfrag_start(parent, 1); qdf_nbuf_set_rx_chfrag_start(parent, 1);
last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len); last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len);
/* /*
* HW issue: MSDU cont bit is set but reported MPDU length can fit * HW issue: MSDU cont bit is set but reported MPDU length can fit
@@ -1301,7 +1295,7 @@ qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
*/ */
if (last_nbuf) { if (last_nbuf) {
DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1); DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); qdf_nbuf_pull_head(parent, soc->rx_pkt_tlv_size);
return parent; return parent;
} }
@@ -1311,8 +1305,8 @@ qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
* till we hit the last_nbuf of the list. * till we hit the last_nbuf of the list.
*/ */
do { do {
last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len); last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len);
qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN); qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
frag_list_len += qdf_nbuf_len(nbuf); frag_list_len += qdf_nbuf_len(nbuf);
if (last_nbuf) { if (last_nbuf) {
@@ -1328,7 +1322,7 @@ qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len); qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
parent->next = next; parent->next = next;
qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN); qdf_nbuf_pull_head(parent, soc->rx_pkt_tlv_size);
return parent; return parent;
} }
@@ -1759,8 +1753,13 @@ static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
uint8_t *rx_tlv_hdr) uint8_t *rx_tlv_hdr)
{ {
qdf_nbuf_rx_cksum_t cksum = {0}; qdf_nbuf_rx_cksum_t cksum = {0};
bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr); //TODO - Move this to ring desc api
bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr); //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET
//HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET
uint32_t ip_csum_err, tcp_udp_csum_er;
hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err,
&tcp_udp_csum_er);
if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) { if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY; cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
@@ -1848,18 +1847,22 @@ static void dp_rx_msdu_stats_update(struct dp_soc *soc,
if (!soc->process_rx_status) if (!soc->process_rx_status)
return; return;
is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr); /*
* TODO - For WCN7850 this field is present in ring_desc
* Try to use ring desc instead of tlv.
*/
is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu); DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu)); DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr); sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr); mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
tid = qdf_nbuf_get_tid_val(nbuf); tid = qdf_nbuf_get_tid_val(nbuf);
bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr); bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc, reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
rx_tlv_hdr); rx_tlv_hdr);
nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr); nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr); pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1, DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf))); ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
@@ -1877,9 +1880,9 @@ static void dp_rx_msdu_stats_update(struct dp_soc *soc,
DP_STATS_INC(peer, rx.sgi_count[sgi], 1); DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
DP_STATS_INCC(peer, rx.err.mic_err, 1, DP_STATS_INCC(peer, rx.err.mic_err, 1,
hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr)); hal_rx_tlv_mic_err_get(soc->hal_soc, rx_tlv_hdr));
DP_STATS_INCC(peer, rx.err.decrypt_err, 1, DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr)); hal_rx_tlv_decrypt_err_get(soc->hal_soc, rx_tlv_hdr));
DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1); DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
DP_STATS_INC(peer, rx.reception_type[reception_type], 1); DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
@@ -1906,7 +1909,7 @@ static void dp_rx_msdu_stats_update(struct dp_soc *soc,
((mcs < MAX_MCS) && (pkt_type == DOT11_AX))); ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
if ((soc->process_rx_status) && if ((soc->process_rx_status) &&
hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) { hal_rx_tlv_first_mpdu_get(soc->hal_soc, rx_tlv_hdr)) {
#if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
if (!vdev->pdev) if (!vdev->pdev)
return; return;
@@ -2076,13 +2079,11 @@ void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN; pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1; QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
qdf_nbuf_set_pktlen(nbuf, pkt_len); qdf_nbuf_set_pktlen(nbuf, pkt_len);
qdf_nbuf_pull_head(nbuf, qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset);
RX_PKT_TLVS_LEN +
l2_hdr_offset);
if (dp_rx_is_special_frame(nbuf, frame_mask)) { if (dp_rx_is_special_frame(nbuf, frame_mask)) {
qdf_nbuf_set_exc_frame(nbuf, 1); qdf_nbuf_set_exc_frame(nbuf, 1);
@@ -2151,10 +2152,10 @@ uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
#endif /* QCA_HOST_MODE_WIFI_DISABLED */ #endif /* QCA_HOST_MODE_WIFI_DISABLED */
#ifdef WLAN_SUPPORT_RX_FISA #ifdef WLAN_SUPPORT_RX_FISA
void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding) void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
{ {
QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN); qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
} }
/** /**
@@ -2170,9 +2171,9 @@ void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding; QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
} }
#else #else
void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding) void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
{ {
qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN); qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
} }
static inline static inline
@@ -2223,7 +2224,6 @@ dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
hal_ring_desc_t ring_desc) hal_ring_desc_t ring_desc)
{ {
struct dp_buf_info_record *record; struct dp_buf_info_record *record;
uint8_t rbm;
struct hal_buf_info hbi; struct hal_buf_info hbi;
uint32_t idx; uint32_t idx;
@@ -2231,7 +2231,10 @@ dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
return; return;
hal_rx_reo_buf_paddr_get(ring_desc, &hbi); hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
rbm = hal_rx_ret_buf_manager_get(ring_desc);
/* buffer_addr_info is the first element of ring_desc */
hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
&hbi);
idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index, idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
DP_RX_HIST_MAX); DP_RX_HIST_MAX);
@@ -2242,7 +2245,7 @@ dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
record->timestamp = qdf_get_log_timestamp(); record->timestamp = qdf_get_log_timestamp();
record->hbi.paddr = hbi.paddr; record->hbi.paddr = hbi.paddr;
record->hbi.sw_cookie = hbi.sw_cookie; record->hbi.sw_cookie = hbi.sw_cookie;
record->hbi.rbm = rbm; record->hbi.rbm = hbi.rbm;
} }
#else #else
static inline void static inline void
@@ -2308,12 +2311,12 @@ void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata); hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + pkt_len = msdu_len + msdu_metadata.l3_hdr_pad +
RX_PKT_TLVS_LEN; soc->rx_pkt_tlv_size;
l2_hdr_offset = l2_hdr_offset =
hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr); hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
qdf_nbuf_set_pktlen(nbuf, pkt_len); qdf_nbuf_set_pktlen(nbuf, pkt_len);
dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad); dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad);
dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, nbuf, dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, nbuf,
HTT_INVALID_VDEV, is_offload, 0); HTT_INVALID_VDEV, is_offload, 0);
@@ -3333,7 +3336,7 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
__func__, __func__,
RX_DESC_REPLENISHED); RX_DESC_REPLENISHED);
hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr, hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr,
desc_list->rx_desc.cookie, desc_list->rx_desc.cookie,
rx_desc_pool->owner); rx_desc_pool->owner);
dp_ipa_handle_rx_buf_smmu_mapping( dp_ipa_handle_rx_buf_smmu_mapping(
@@ -3594,7 +3597,7 @@ bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
skip_len = l2_hdr_offset; skip_len = l2_hdr_offset;
} else { } else {
msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf); msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
skip_len = l2_hdr_offset + RX_PKT_TLVS_LEN; skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len); qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
} }

View File

@@ -1532,6 +1532,7 @@ struct dp_arch_ops {
struct dp_vdev *vdev); struct dp_vdev *vdev);
QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc, QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc,
struct dp_vdev *vdev); struct dp_vdev *vdev);
QDF_STATUS (*dp_rxdma_ring_sel_cfg)(struct dp_soc *soc);
/* TX RX Arch Ops */ /* TX RX Arch Ops */
QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev, QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev,
@@ -2005,6 +2006,11 @@ struct dp_soc {
#endif #endif
/* BM id for first WBM2SW ring */ /* BM id for first WBM2SW ring */
uint32_t wbm_sw0_bm_id; uint32_t wbm_sw0_bm_id;
/* rx monitor pkt tlv size */
uint16_t rx_mon_pkt_tlv_size;
/* rx pkt tlv size */
uint16_t rx_pkt_tlv_size;
}; };
#ifdef IPA_OFFLOAD #ifdef IPA_OFFLOAD

View File

@@ -17,6 +17,8 @@
*/ */
#include "dp_types.h" #include "dp_types.h"
#include <dp_internal.h>
#include <dp_htt.h>
#include "dp_li.h" #include "dp_li.h"
#include "dp_li_tx.h" #include "dp_li_tx.h"
#include "dp_li_rx.h" #include "dp_li_rx.h"
@@ -74,6 +76,175 @@ qdf_size_t dp_get_soc_context_size_li(void)
return sizeof(struct dp_soc); return sizeof(struct dp_soc);
} }
#ifdef NO_RX_PKT_HDR_TLV
/**
* dp_rxdma_ring_sel_cfg_li() - Setup RXDMA ring config
* @soc: Common DP soc handle
*
* Return: QDF_STATUS
*/
static QDF_STATUS
dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
{
int i;
int mac_id;
struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
QDF_STATUS status = QDF_STATUS_SUCCESS;
htt_tlv_filter.mpdu_start = 1;
htt_tlv_filter.msdu_start = 1;
htt_tlv_filter.mpdu_end = 1;
htt_tlv_filter.msdu_end = 1;
htt_tlv_filter.attention = 1;
htt_tlv_filter.packet = 1;
htt_tlv_filter.packet_header = 0;
htt_tlv_filter.ppdu_start = 0;
htt_tlv_filter.ppdu_end = 0;
htt_tlv_filter.ppdu_end_user_stats = 0;
htt_tlv_filter.ppdu_end_user_stats_ext = 0;
htt_tlv_filter.ppdu_end_status_done = 0;
htt_tlv_filter.enable_fp = 1;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_mo = 0;
htt_tlv_filter.fp_mgmt_filter = 0;
htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
FILTER_DATA_MCAST |
FILTER_DATA_DATA);
htt_tlv_filter.mo_mgmt_filter = 0;
htt_tlv_filter.mo_ctrl_filter = 0;
htt_tlv_filter.mo_data_filter = 0;
htt_tlv_filter.md_data_filter = 0;
htt_tlv_filter.offset_valid = true;
htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
/*Not subscribing rx_pkt_header*/
htt_tlv_filter.rx_header_offset = 0;
htt_tlv_filter.rx_mpdu_start_offset =
hal_rx_mpdu_start_offset_get(soc->hal_soc);
htt_tlv_filter.rx_mpdu_end_offset =
hal_rx_mpdu_end_offset_get(soc->hal_soc);
htt_tlv_filter.rx_msdu_start_offset =
hal_rx_msdu_start_offset_get(soc->hal_soc);
htt_tlv_filter.rx_msdu_end_offset =
hal_rx_msdu_end_offset_get(soc->hal_soc);
htt_tlv_filter.rx_attn_offset =
hal_rx_attn_offset_get(soc->hal_soc);
for (i = 0; i < MAX_PDEV_CNT; i++) {
struct dp_pdev *pdev = soc->pdev_list[i];
if (!pdev)
continue;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
int mac_for_pdev =
dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
/*
* Obtain lmac id from pdev to access the LMAC ring
* in soc context
*/
int lmac_id =
dp_get_lmac_id_for_pdev_id(soc, mac_id,
pdev->pdev_id);
htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
soc->rx_refill_buf_ring[lmac_id].
hal_srng,
RXDMA_BUF, RX_DATA_BUFFER_SIZE,
&htt_tlv_filter);
}
}
return status;
}
#else
static QDF_STATUS
dp_rxdma_ring_sel_cfg_li(struct dp_soc *soc)
{
int i;
int mac_id;
struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
struct dp_srng *rx_mac_srng;
QDF_STATUS status = QDF_STATUS_SUCCESS;
htt_tlv_filter.mpdu_start = 1;
htt_tlv_filter.msdu_start = 1;
htt_tlv_filter.mpdu_end = 1;
htt_tlv_filter.msdu_end = 1;
htt_tlv_filter.attention = 1;
htt_tlv_filter.packet = 1;
htt_tlv_filter.packet_header = 1;
htt_tlv_filter.ppdu_start = 0;
htt_tlv_filter.ppdu_end = 0;
htt_tlv_filter.ppdu_end_user_stats = 0;
htt_tlv_filter.ppdu_end_user_stats_ext = 0;
htt_tlv_filter.ppdu_end_status_done = 0;
htt_tlv_filter.enable_fp = 1;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_mo = 0;
htt_tlv_filter.fp_mgmt_filter = 0;
htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
FILTER_DATA_MCAST |
FILTER_DATA_DATA);
htt_tlv_filter.mo_mgmt_filter = 0;
htt_tlv_filter.mo_ctrl_filter = 0;
htt_tlv_filter.mo_data_filter = 0;
htt_tlv_filter.md_data_filter = 0;
htt_tlv_filter.offset_valid = true;
htt_tlv_filter.rx_packet_offset = soc->rx_pkt_tlv_size;
htt_tlv_filter.rx_header_offset =
hal_rx_pkt_tlv_offset_get(soc->hal_soc);
htt_tlv_filter.rx_mpdu_start_offset =
hal_rx_mpdu_start_offset_get(soc->hal_soc);
htt_tlv_filter.rx_mpdu_end_offset =
hal_rx_mpdu_end_offset_get(soc->hal_soc);
htt_tlv_filter.rx_msdu_start_offset =
hal_rx_msdu_start_offset_get(soc->hal_soc);
htt_tlv_filter.rx_msdu_end_offset =
hal_rx_msdu_end_offset_get(soc->hal_soc);
htt_tlv_filter.rx_attn_offset =
hal_rx_attn_offset_get(soc->hal_soc);
for (i = 0; i < MAX_PDEV_CNT; i++) {
struct dp_pdev *pdev = soc->pdev_list[i];
if (!pdev)
continue;
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
int mac_for_pdev =
dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
/*
* Obtain lmac id from pdev to access the LMAC ring
* in soc context
*/
int lmac_id =
dp_get_lmac_id_for_pdev_id(soc, mac_id,
pdev->pdev_id);
rx_mac_srng = dp_get_rxdma_ring(pdev, lmac_id);
htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
rx_mac_srng->hal_srng,
RXDMA_BUF, RX_DATA_BUFFER_SIZE,
&htt_tlv_filter);
}
}
return status;
}
#endif
void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops) void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
{ {
arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li; arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_li;
@@ -87,4 +258,5 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
arch_ops->tx_comp_get_params_from_hal_desc = arch_ops->tx_comp_get_params_from_hal_desc =
dp_tx_comp_get_params_from_hal_desc_li; dp_tx_comp_get_params_from_hal_desc_li;
arch_ops->dp_rx_process = dp_rx_process_li; arch_ops->dp_rx_process = dp_rx_process_li;
arch_ops->dp_rxdma_ring_sel_cfg = dp_rxdma_ring_sel_cfg_li;
} }

View File

@@ -20,6 +20,8 @@
#include "dp_tx.h" #include "dp_tx.h"
#include "dp_li_tx.h" #include "dp_li_tx.h"
#include "dp_tx_desc.h" #include "dp_tx_desc.h"
#include <dp_internal.h>
#include <dp_htt.h>
#include <hal_li_api.h> #include <hal_li_api.h>
#include <hal_li_tx.h> #include <hal_li_tx.h>

View File

@@ -1452,14 +1452,14 @@ hal_rx_status_get_tlv_info_generic_be(void *rx_tlv_hdr, void *ppduinfo,
case WIFIRX_MPDU_START_E: case WIFIRX_MPDU_START_E:
{ {
uint8_t *rx_mpdu_start = (uint8_t *)rx_tlv; uint8_t *rx_mpdu_start = (uint8_t *)rx_tlv;
uint32_t ppdu_id = HAL_RX_GET_PPDU_ID(rx_mpdu_start); uint32_t ppdu_id = HAL_RX_GET_PPDU_ID(rx_tlv);
uint8_t filter_category = 0; uint8_t filter_category = 0;
ppdu_info->nac_info.fc_valid = ppdu_info->nac_info.fc_valid =
HAL_RX_GET_FC_VALID(rx_mpdu_start); HAL_RX_GET_FC_VALID(rx_tlv);
ppdu_info->nac_info.to_ds_flag = ppdu_info->nac_info.to_ds_flag =
HAL_RX_GET_TO_DS_FLAG(rx_mpdu_start); HAL_RX_GET_TO_DS_FLAG(rx_tlv);
ppdu_info->nac_info.frame_control = ppdu_info->nac_info.frame_control =
HAL_RX_GET(rx_mpdu_start, HAL_RX_GET(rx_mpdu_start,
@@ -1467,7 +1467,7 @@ hal_rx_status_get_tlv_info_generic_be(void *rx_tlv_hdr, void *ppduinfo,
MPDU_FRAME_CONTROL_FIELD); MPDU_FRAME_CONTROL_FIELD);
ppdu_info->sw_frame_group_id = ppdu_info->sw_frame_group_id =
HAL_RX_GET_SW_FRAME_GROUP_ID(rx_mpdu_start); HAL_RX_GET_SW_FRAME_GROUP_ID(rx_tlv);
if (ppdu_info->sw_frame_group_id == if (ppdu_info->sw_frame_group_id ==
HAL_MPDU_SW_FRAME_GROUP_NULL_DATA) { HAL_MPDU_SW_FRAME_GROUP_NULL_DATA) {
@@ -1481,7 +1481,7 @@ hal_rx_status_get_tlv_info_generic_be(void *rx_tlv_hdr, void *ppduinfo,
ppdu_info); ppdu_info);
ppdu_info->nac_info.mac_addr2_valid = ppdu_info->nac_info.mac_addr2_valid =
HAL_RX_GET_MAC_ADDR2_VALID(rx_mpdu_start); HAL_RX_TLV_MPDU_MAC_ADDR_AD2_VALID_GET(rx_tlv);
*(uint16_t *)&ppdu_info->nac_info.mac_addr2[0] = *(uint16_t *)&ppdu_info->nac_info.mac_addr2[0] =
HAL_RX_GET(rx_mpdu_start, HAL_RX_GET(rx_mpdu_start,
@@ -1505,7 +1505,7 @@ hal_rx_status_get_tlv_info_generic_be(void *rx_tlv_hdr, void *ppduinfo,
} }
filter_category = filter_category =
HAL_RX_GET_FILTER_CATEGORY(rx_mpdu_start); HAL_RX_GET_FILTER_CATEGORY(rx_tlv);
if (filter_category == 0) if (filter_category == 0)
ppdu_info->rx_status.rxpcu_filter_pass = 1; ppdu_info->rx_status.rxpcu_filter_pass = 1;
@@ -1679,42 +1679,6 @@ void hal_tx_update_tidmap_prty_generic_be(struct hal_soc *soc, uint8_t value)
(value & HWIO_TCL_R0_TID_MAP_PRTY_RMSK)); (value & HWIO_TCL_R0_TID_MAP_PRTY_RMSK));
} }
/**
* hal_rx_mpdu_end_offset_get_generic(): API to get the
* mpdu_end structure offset rx_pkt_tlv structure
*
* NOTE: API returns offset of attn TLV from structure
* rx_pkt_tlvs
*/
static uint32_t hal_rx_mpdu_end_offset_get_generic(void)
{
return RX_PKT_TLV_OFFSET(mpdu_end_tlv);
}
/**
* hal_rx_attn_offset_get_generic(): API to get the
* msdu_end structure offset rx_pkt_tlv structure
*
* NOTE: API returns offset of attn TLV from structure
* rx_pkt_tlvs
*/
static uint32_t hal_rx_attn_offset_get_generic(void)
{
return RX_PKT_TLV_OFFSET(attn_tlv);
}
/**
* hal_rx_msdu_start_offset_get_generic(): API to get the
* msdu_start structure offset rx_pkt_tlv structure
*
* NOTE: API returns offset of attn TLV from structure
* rx_pkt_tlvs
*/
static uint32_t hal_rx_msdu_start_offset_get_generic(void)
{
return RX_PKT_TLV_OFFSET(msdu_start_tlv);
}
/** /**
* hal_rx_get_tlv_size_generic_be() - Get rx packet tlv size * hal_rx_get_tlv_size_generic_be() - Get rx packet tlv size
* @rx_pkt_tlv_size: TLV size for regular RX packets * @rx_pkt_tlv_size: TLV size for regular RX packets

File diff suppressed because it is too large Load Diff

View File

@@ -115,7 +115,6 @@
#include "hal_7850_tx.h" #include "hal_7850_tx.h"
#include "hal_7850_rx.h" #include "hal_7850_rx.h"
#include "hal_7850_rx_tlv.h"
#include "hal_be_rx_tlv.h" #include "hal_be_rx_tlv.h"
#include <hal_generic_api.h> #include <hal_generic_api.h>
@@ -256,6 +255,55 @@ void *hal_dst_mpdu_desc_info_7850(void *dst_ring_desc)
return (void *)HAL_DST_MPDU_DESC_INFO(dst_ring_desc); return (void *)HAL_DST_MPDU_DESC_INFO(dst_ring_desc);
} }
/*
* hal_rx_get_tlv_7850(): API to get the tlv
*
* @rx_tlv: TLV data extracted from the rx packet
* Return: uint8_t
*/
static uint8_t hal_rx_get_tlv_7850(void *rx_tlv)
{
return HAL_RX_GET(rx_tlv, PHYRX_RSSI_LEGACY, RECEIVE_BANDWIDTH);
}
/**
* hal_rx_proc_phyrx_other_receive_info_tlv_7850()
* - process other receive info TLV
* @rx_tlv_hdr: pointer to TLV header
* @ppdu_info: pointer to ppdu_info
*
* Return: None
*/
static
void hal_rx_proc_phyrx_other_receive_info_tlv_7850(void *rx_tlv_hdr,
void *ppdu_info_handle)
{
uint32_t tlv_tag, tlv_len;
uint32_t temp_len, other_tlv_len, other_tlv_tag;
void *rx_tlv = (uint8_t *)rx_tlv_hdr + HAL_RX_TLV32_HDR_SIZE;
void *other_tlv_hdr = NULL;
void *other_tlv = NULL;
tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv_hdr);
tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv_hdr);
temp_len = 0;
other_tlv_hdr = rx_tlv + HAL_RX_TLV32_HDR_SIZE;
other_tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(other_tlv_hdr);
other_tlv_len = HAL_RX_GET_USER_TLV32_LEN(other_tlv_hdr);
temp_len += other_tlv_len;
other_tlv = other_tlv_hdr + HAL_RX_TLV32_HDR_SIZE;
switch (other_tlv_tag) {
default:
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s unhandled TLV type: %d, TLV len:%d",
__func__, other_tlv_tag, other_tlv_len);
break;
}
}
/** /**
* hal_reo_config_7850(): Set reo config parameters * hal_reo_config_7850(): Set reo config parameters
* @soc: hal soc handle * @soc: hal soc handle
@@ -696,11 +744,6 @@ static void hal_hw_txrx_ops_attach_wcn7850(struct hal_soc *hal_soc)
/* rx - TLV struct offsets */ /* rx - TLV struct offsets */
hal_soc->ops->hal_rx_msdu_end_offset_get = hal_soc->ops->hal_rx_msdu_end_offset_get =
hal_rx_msdu_end_offset_get_generic; hal_rx_msdu_end_offset_get_generic;
// hal_soc->ops->hal_rx_attn_offset_get = hal_rx_attn_offset_get_generic;
// hal_soc->ops->hal_rx_msdu_start_offset_get =
// hal_rx_msdu_start_offset_get_generic;
// hal_soc->ops->hal_rx_mpdu_end_offset_get =
// hal_rx_mpdu_end_offset_get_generic;
hal_soc->ops->hal_rx_mpdu_start_offset_get = hal_soc->ops->hal_rx_mpdu_start_offset_get =
hal_rx_mpdu_start_offset_get_generic; hal_rx_mpdu_start_offset_get_generic;
hal_soc->ops->hal_rx_pkt_tlv_offset_get = hal_soc->ops->hal_rx_pkt_tlv_offset_get =