qcacmn: add exception frame flag for non-regular RX delivery

FISA RX aggregation is not necessary for non-regular RX delivery
as it requires extra FISA flush and also may impact regular
dp_rx_process() RX FISA aggregation.
  Add exception frame flag for non-regular RX delivery, so that
FISA path can identify this frame and bypass FISA RX.

Change-Id: Ic06cb72b516221754b124a673ab6c4f392947897
CRs-Fixed: 2680255
This commit is contained in:
Jinwei Chen
2020-05-13 10:37:01 +08:00
committed by nshrivas
parent d38acd0c2f
commit 800b1b181b
2 changed files with 33 additions and 35 deletions

View File

@@ -1791,7 +1791,10 @@ void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
l2_hdr_offset); l2_hdr_offset);
if (dp_rx_is_special_frame(nbuf, frame_mask)) { if (dp_rx_is_special_frame(nbuf, frame_mask)) {
vdev->osif_rx(vdev->osif_vdev, nbuf); qdf_nbuf_set_exc_frame(nbuf, 1);
if (QDF_STATUS_SUCCESS !=
vdev->osif_rx(vdev->osif_vdev, nbuf))
goto deliver_fail;
DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1); DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
return; return;
} }
@@ -2989,6 +2992,7 @@ bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
qdf_nbuf_pull_head(nbuf, skip_len); qdf_nbuf_pull_head(nbuf, skip_len);
if (dp_rx_is_special_frame(nbuf, frame_mask)) { if (dp_rx_is_special_frame(nbuf, frame_mask)) {
qdf_nbuf_set_exc_frame(nbuf, 1);
dp_rx_deliver_to_stack(soc, peer->vdev, peer, dp_rx_deliver_to_stack(soc, peer->vdev, peer,
nbuf, NULL); nbuf, NULL);
return true; return true;

View File

@@ -1077,44 +1077,37 @@ dp_rx_null_q_desc_handle(struct dp_soc *soc, qdf_nbuf_t nbuf,
qdf_nbuf_set_next(nbuf, NULL); qdf_nbuf_set_next(nbuf, NULL);
dp_rx_deliver_raw(vdev, nbuf, peer); dp_rx_deliver_raw(vdev, nbuf, peer);
} else { } else {
if (vdev->osif_rx) { qdf_nbuf_set_next(nbuf, NULL);
qdf_nbuf_set_next(nbuf, NULL); DP_STATS_INC_PKT(peer, rx.to_stack, 1,
DP_STATS_INC_PKT(peer, rx.to_stack, 1, qdf_nbuf_len(nbuf));
/*
* Update the protocol tag in SKB based on
* CCE metadata
*/
dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
EXCEPTION_DEST_RING_ID,
true, true);
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf,
rx_tlv_hdr, true);
if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
soc->hal_soc, rx_tlv_hdr) &&
(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet))) {
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
DP_STATS_INC_PKT(peer, rx.multicast, 1,
qdf_nbuf_len(nbuf)); qdf_nbuf_len(nbuf));
/* if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
* Update the protocol tag in SKB based on DP_STATS_INC_PKT(peer, rx.bcast, 1,
* CCE metadata
*/
dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
EXCEPTION_DEST_RING_ID,
true, true);
/* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf,
rx_tlv_hdr, true);
if (qdf_unlikely(hal_rx_msdu_end_da_is_mcbc_get(
soc->hal_soc, rx_tlv_hdr) &&
(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet))) {
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
DP_STATS_INC_PKT(peer, rx.multicast, 1,
qdf_nbuf_len(nbuf)); qdf_nbuf_len(nbuf));
if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
DP_STATS_INC_PKT(peer, rx.bcast, 1,
qdf_nbuf_len(nbuf));
}
}
vdev->osif_rx(vdev->osif_vdev, nbuf);
} else {
dp_err_rl("INVALID osif_rx. vdev %pK", vdev);
DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
goto drop_nbuf;
} }
qdf_nbuf_set_exc_frame(nbuf, 1);
dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
} }
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
@@ -1283,6 +1276,7 @@ process_rx:
/* Update the flow tag in SKB based on FSE metadata */ /* Update the flow tag in SKB based on FSE metadata */
dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true); dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
DP_STATS_INC(peer, rx.to_stack.num, 1); DP_STATS_INC(peer, rx.to_stack.num, 1);
qdf_nbuf_set_exc_frame(nbuf, 1);
dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL); dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
} }