qcacmn: Changes for Rx fast flag

Change to avoid processing of extended features
based of Fast RX flag

Also add changes to enable or disable this flag
based on feature settings

Change-Id: I8064780a271f8bdcf396bb9e2e2c14998f195535
CRs-Fixed: 3268842
Šī revīzija ir iekļauta:
Chaithanya Garrepalli
2022-08-29 15:58:20 +05:30
revīziju iesūtīja Madan Koyyalamudi
vecāks f60c8e57d3
revīzija daed8f2cc5
5 mainīti faili ar 159 papildinājumiem un 61 dzēšanām

Parādīt failu

@@ -735,21 +735,6 @@ done:
dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK);
/*
* process frame for mulitpass phrase processing
*/
if (qdf_unlikely(vdev->multipass_en)) {
if (dp_rx_multipass_process(txrx_peer, nbuf,
tid) == false) {
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.multipass_rx_pkt_drop,
1);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
}
}
if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, txrx_peer)) {
dp_rx_err("%pK: Policy Check Drop pkt", soc);
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
@@ -762,18 +747,6 @@ done:
continue;
}
if (qdf_unlikely(txrx_peer && (txrx_peer->nawds_enabled) &&
(qdf_nbuf_is_da_mcbc(nbuf)) &&
(hal_rx_get_mpdu_mac_ad4_valid_be(rx_tlv_hdr)
== false))) {
tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.nawds_mcast_drop, 1);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
}
/*
* Drop non-EAPOL frames from unauthorized peer.
*/
@@ -793,34 +766,45 @@ done:
}
}
if (soc->process_rx_status)
dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
/* Update the protocol tag in SKB based on CCE metadata */
dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
reo_ring_num, false, true);
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
reo_ring_num, tid_stats);
if (qdf_unlikely(vdev->mesh_vdev)) {
if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
== QDF_STATUS_SUCCESS) {
dp_rx_info("%pK: mesh pkt filtered", soc);
tid_stats->fail_cnt[MESH_FILTER_DROP]++;
DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
1);
if (qdf_unlikely(!rx_pdev->rx_fast_flag)) {
/*
* process frame for mulitpass phrase processing
*/
if (qdf_unlikely(vdev->multipass_en)) {
if (dp_rx_multipass_process(txrx_peer, nbuf,
tid) == false) {
DP_PEER_PER_PKT_STATS_INC
(txrx_peer,
rx.multipass_rx_pkt_drop, 1);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
}
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
txrx_peer);
}
if (qdf_unlikely(txrx_peer &&
(txrx_peer->nawds_enabled) &&
(qdf_nbuf_is_da_mcbc(nbuf)) &&
(hal_rx_get_mpdu_mac_ad4_valid_be
(rx_tlv_hdr) == false))) {
tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
DP_PEER_PER_PKT_STATS_INC(txrx_peer,
rx.nawds_mcast_drop,
1);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
}
/* Update the protocol tag in SKB based on CCE metadata
*/
dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
reo_ring_num, false, true);
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
true);
if (qdf_likely(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet) &&
@@ -830,7 +814,33 @@ done:
txrx_peer,
nbuf,
msdu_metadata);
}
if (qdf_unlikely(vdev->mesh_vdev)) {
if (dp_rx_filter_mesh_packets(vdev, nbuf,
rx_tlv_hdr)
== QDF_STATUS_SUCCESS) {
dp_rx_info("%pK: mesh pkt filtered",
soc);
tid_stats->fail_cnt[MESH_FILTER_DROP]++;
DP_STATS_INC(vdev->pdev,
dropped.mesh_filter, 1);
dp_rx_nbuf_free(nbuf);
nbuf = next;
continue;
}
dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr,
txrx_peer);
}
}
dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
reo_ring_num, tid_stats);
if (qdf_likely(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet) &&
qdf_likely(!vdev->mesh_vdev)) {
/* Intrabss-fwd */
if (dp_rx_check_ap_bridge(vdev))
if (dp_rx_intrabss_fwd_be(soc, txrx_peer,

Parādīt failu

@@ -4195,4 +4195,13 @@ void dp_rx_err_send_pktlog(struct dp_soc *soc, struct dp_pdev *pdev,
{
}
#endif
/*
* dp_pdev_update_fast_rx_flag() - Update Fast rx flag for a PDEV
* @soc : Data path soc handle
* @pdev : PDEV handle
*
* return: None
*/
void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev);
#endif /* #ifndef _DP_INTERNAL_H_ */

Parādīt failu

@@ -2142,6 +2142,67 @@ static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
#endif /* DP_MEM_PRE_ALLOC */
#ifdef QCA_SUPPORT_WDS_EXTENDED
static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
{
return vdev->wds_ext_enabled;
}
#else
static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
{
return false;
}
#endif
void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev)
{
struct dp_vdev *vdev = NULL;
uint8_t rx_fast_flag = true;
if (wlan_cfg_is_rx_flow_tag_enabled(soc->wlan_cfg_ctx)) {
rx_fast_flag = false;
goto update_flag;
}
/* Check if protocol tagging enable */
if (pdev->is_rx_protocol_tagging_enabled) {
rx_fast_flag = false;
goto update_flag;
}
qdf_spin_lock_bh(&pdev->vdev_list_lock);
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
/* Check if any VDEV has NAWDS enabled */
if (vdev->nawds_enabled) {
rx_fast_flag = false;
break;
}
/* Check if any VDEV has multipass enabled */
if (vdev->multipass_en) {
rx_fast_flag = false;
break;
}
/* Check if any VDEV has mesh enabled */
if (vdev->mesh_vdev) {
rx_fast_flag = false;
break;
}
/* Check if any VDEV has WDS ext enabled */
if (dp_vdev_is_wds_ext_enabled(vdev)) {
rx_fast_flag = false;
break;
}
}
qdf_spin_unlock_bh(&pdev->vdev_list_lock);
update_flag:
dp_init_info("Updated Rx fast flag to %u", rx_fast_flag);
pdev->rx_fast_flag = rx_fast_flag;
}
/*
* dp_srng_free() - Free SRNG memory
* @soc : Data path soc handle
@@ -7066,6 +7127,9 @@ static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
if (wlan_op_mode_sta == vdev->opmode)
dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
dp_pdev_update_fast_rx_flag(soc, pdev);
return QDF_STATUS_SUCCESS;
fail0:
@@ -10788,6 +10852,9 @@ dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
/* Update PDEV flags as VDEV flags are updated */
dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev);
dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
return QDF_STATUS_SUCCESS;
@@ -16694,6 +16761,7 @@ static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
dp_init_tso_stats(pdev);
pdev->rx_fast_flag = false;
dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
qdf_dma_mem_stats_read(),
qdf_heap_mem_stats_read(),

Parādīt failu

@@ -2149,6 +2149,7 @@ void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
*
* Return: void
*/
#if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1)
static inline
void dp_rx_cksum_offload(struct dp_pdev *pdev,
qdf_nbuf_t nbuf,
@@ -2171,7 +2172,14 @@ void dp_rx_cksum_offload(struct dp_pdev *pdev,
DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
}
}
#else
static inline
void dp_rx_cksum_offload(struct dp_pdev *pdev,
qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr)
{
}
#endif
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
#ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT

Parādīt failu

@@ -2876,19 +2876,19 @@ struct dp_pdev {
*/
/* PDEV Id */
int pdev_id;
uint8_t pdev_id;
/* LMAC Id */
int lmac_id;
uint8_t lmac_id;
/* Target pdev Id */
int target_pdev_id;
uint8_t target_pdev_id;
bool pdev_deinit;
/* TXRX SOC handle */
struct dp_soc *soc;
bool pdev_deinit;
/* pdev status down or up required to handle dynamic hw
* mode switch between DBS and DBS_SBS.
* 1 = down
@@ -2899,6 +2899,9 @@ struct dp_pdev {
/* Enhanced Stats is enabled */
bool enhanced_stats_en;
/* Flag to indicate fast RX */
bool rx_fast_flag;
/* Second ring used to replenish rx buffers */
struct dp_srng rx_refill_buf_ring2;
#ifdef IPA_WDI3_VLAN_SUPPORT