|
@@ -1626,6 +1626,133 @@ fail:
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
|
|
|
|
+/**
|
|
|
|
+ * dp_rx_err_route_hdl() - Function to send EAPOL frames to stack
|
|
|
|
+ * Free any other packet which comes in
|
|
|
|
+ * this path.
|
|
|
|
+ *
|
|
|
|
+ * @soc: core DP main context
|
|
|
|
+ * @nbuf: buffer pointer
|
|
|
|
+ * @peer: peer handle
|
|
|
|
+ * @rx_tlv_hdr: start of rx tlv header
|
|
|
|
+ * @err_src: rxdma/reo
|
|
|
|
+ *
|
|
|
|
+ * This function indicates EAPOL frame received in wbm error ring to stack.
|
|
|
|
+ * Any other frame should be dropped.
|
|
|
|
+ *
|
|
|
|
+ * Return: SUCCESS if delivered to stack
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
|
|
|
+ struct dp_peer *peer, uint8_t *rx_tlv_hdr,
|
|
|
|
+ enum hal_rx_wbm_error_source err_src)
|
|
|
|
+{
|
|
|
|
+ uint32_t pkt_len;
|
|
|
|
+ uint16_t msdu_len;
|
|
|
|
+ struct dp_vdev *vdev;
|
|
|
|
+ struct hal_rx_msdu_metadata msdu_metadata;
|
|
|
|
+
|
|
|
|
+ hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
|
|
|
|
+ msdu_len = hal_rx_msdu_start_msdu_len_get(soc->hal_soc, rx_tlv_hdr);
|
|
|
|
+ pkt_len = msdu_len + msdu_metadata.l3_hdr_pad + soc->rx_pkt_tlv_size;
|
|
|
|
+
|
|
|
|
+ if (qdf_likely(!qdf_nbuf_is_frag(nbuf))) {
|
|
|
|
+ if (dp_rx_check_pkt_len(soc, pkt_len))
|
|
|
|
+ goto drop_nbuf;
|
|
|
|
+
|
|
|
|
+ /* Set length in nbuf */
|
|
|
|
+ qdf_nbuf_set_pktlen(
|
|
|
|
+ nbuf, qdf_min(pkt_len, (uint32_t)RX_DATA_BUFFER_SIZE));
|
|
|
|
+ qdf_assert_always(nbuf->data == rx_tlv_hdr);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Check if DMA completed -- msdu_done is the last bit
|
|
|
|
+ * to be written
|
|
|
|
+ */
|
|
|
|
+ if (!hal_rx_attn_msdu_done_get(soc->hal_soc, rx_tlv_hdr)) {
|
|
|
|
+ dp_err_rl("MSDU DONE failure");
|
|
|
|
+ hal_rx_dump_pkt_tlvs(soc->hal_soc, rx_tlv_hdr,
|
|
|
|
+ QDF_TRACE_LEVEL_INFO);
|
|
|
|
+ qdf_assert(0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!peer)
|
|
|
|
+ goto drop_nbuf;
|
|
|
|
+
|
|
|
|
+ vdev = peer->vdev;
|
|
|
|
+ if (!vdev) {
|
|
|
|
+ dp_err_rl("Null vdev!");
|
|
|
|
+ DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
|
|
|
|
+ goto drop_nbuf;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Advance the packet start pointer by total size of
|
|
|
|
+ * pre-header TLV's
|
|
|
|
+ */
|
|
|
|
+ if (qdf_nbuf_is_frag(nbuf))
|
|
|
|
+ qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
|
|
|
|
+ else
|
|
|
|
+ qdf_nbuf_pull_head(nbuf, (msdu_metadata.l3_hdr_pad +
|
|
|
|
+ soc->rx_pkt_tlv_size));
|
|
|
|
+
|
|
|
|
+ dp_vdev_peer_stats_update_protocol_cnt(vdev, nbuf, NULL, 0, 1);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Indicate EAPOL frame to stack only when vap mac address
|
|
|
|
+ * matches the destination address.
|
|
|
|
+ */
|
|
|
|
+ if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
|
|
|
|
+ qdf_nbuf_is_ipv4_wapi_pkt(nbuf)) {
|
|
|
|
+ qdf_ether_header_t *eh =
|
|
|
|
+ (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
|
|
|
|
+ if (qdf_mem_cmp(eh->ether_dhost, &vdev->mac_addr.raw[0],
|
|
|
|
+ QDF_MAC_ADDR_SIZE) == 0) {
|
|
|
|
+ /*
|
|
|
|
+ * Update the protocol tag in SKB based on
|
|
|
|
+ * CCE metadata.
|
|
|
|
+ */
|
|
|
|
+ dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
|
|
|
|
+ EXCEPTION_DEST_RING_ID,
|
|
|
|
+ true, true);
|
|
|
|
+ /* Update the flow tag in SKB based on FSE metadata */
|
|
|
|
+ dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr,
|
|
|
|
+ true);
|
|
|
|
+ DP_STATS_INC(peer, rx.to_stack.num, 1);
|
|
|
|
+ qdf_nbuf_set_exc_frame(nbuf, 1);
|
|
|
|
+ qdf_nbuf_set_next(nbuf, NULL);
|
|
|
|
+ dp_rx_deliver_to_stack(soc, vdev, peer, nbuf, NULL);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+drop_nbuf:
|
|
|
|
+
|
|
|
|
+ DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
|
|
|
|
+ err_src == HAL_RX_WBM_ERR_SRC_REO);
|
|
|
|
+ DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
|
|
|
|
+ err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
|
|
|
|
+
|
|
|
|
+ qdf_nbuf_free(nbuf);
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+dp_rx_err_route_hdl(struct dp_soc *soc, qdf_nbuf_t nbuf,
|
|
|
|
+ struct dp_peer *peer, uint8_t *rx_tlv_hdr,
|
|
|
|
+ enum hal_rx_wbm_error_source err_src)
|
|
|
|
+{
|
|
|
|
+ DP_STATS_INCC(soc, rx.reo2rel_route_drop, 1,
|
|
|
|
+ err_src == HAL_RX_WBM_ERR_SRC_REO);
|
|
|
|
+ DP_STATS_INCC(soc, rx.rxdma2rel_route_drop, 1,
|
|
|
|
+ err_src == HAL_RX_WBM_ERR_SRC_RXDMA);
|
|
|
|
+
|
|
|
|
+ qdf_nbuf_free(nbuf);
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
|
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
|
|
|
|
|
#ifdef DP_RX_DESC_COOKIE_INVALIDATE
|
|
#ifdef DP_RX_DESC_COOKIE_INVALIDATE
|
|
@@ -2477,10 +2604,10 @@ done:
|
|
qdf_nbuf_free(nbuf);
|
|
qdf_nbuf_free(nbuf);
|
|
}
|
|
}
|
|
} else if (wbm_err_info.reo_psh_rsn
|
|
} else if (wbm_err_info.reo_psh_rsn
|
|
- == HAL_RX_WBM_REO_PSH_RSN_ROUTE) {
|
|
|
|
- DP_STATS_INC(soc, rx.reo2rel_route_drop, 1);
|
|
|
|
- qdf_nbuf_free(nbuf);
|
|
|
|
- }
|
|
|
|
|
|
+ == HAL_RX_WBM_REO_PSH_RSN_ROUTE)
|
|
|
|
+ dp_rx_err_route_hdl(soc, nbuf, peer,
|
|
|
|
+ rx_tlv_hdr,
|
|
|
|
+ HAL_RX_WBM_ERR_SRC_REO);
|
|
} else if (wbm_err_info.wbm_err_src ==
|
|
} else if (wbm_err_info.wbm_err_src ==
|
|
HAL_RX_WBM_ERR_SRC_RXDMA) {
|
|
HAL_RX_WBM_ERR_SRC_RXDMA) {
|
|
if (wbm_err_info.rxdma_psh_rsn
|
|
if (wbm_err_info.rxdma_psh_rsn
|
|
@@ -2545,10 +2672,10 @@ done:
|
|
wbm_err_info.rxdma_err_code);
|
|
wbm_err_info.rxdma_err_code);
|
|
}
|
|
}
|
|
} else if (wbm_err_info.rxdma_psh_rsn
|
|
} else if (wbm_err_info.rxdma_psh_rsn
|
|
- == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE) {
|
|
|
|
- DP_STATS_INC(soc, rx.rxdma2rel_route_drop, 1);
|
|
|
|
- qdf_nbuf_free(nbuf);
|
|
|
|
- }
|
|
|
|
|
|
+ == HAL_RX_WBM_RXDMA_PSH_RSN_ROUTE)
|
|
|
|
+ dp_rx_err_route_hdl(soc, nbuf, peer,
|
|
|
|
+ rx_tlv_hdr,
|
|
|
|
+ HAL_RX_WBM_ERR_SRC_RXDMA);
|
|
} else {
|
|
} else {
|
|
/* Should not come here */
|
|
/* Should not come here */
|
|
qdf_assert(0);
|
|
qdf_assert(0);
|