|
@@ -833,6 +833,19 @@ void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+#ifdef QCA_OL_TX_MULTIQ_SUPPORT
|
|
|
|
+static inline
|
|
|
|
+void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
|
|
|
|
+{
|
|
|
|
+ qdf_nbuf_set_queue_mapping(nbuf, ring_id);
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static inline
|
|
|
|
+void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
|
|
* dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
|
|
*
|
|
*
|
|
@@ -850,6 +863,7 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
|
{
|
|
{
|
|
uint16_t len;
|
|
uint16_t len;
|
|
qdf_nbuf_t nbuf_copy;
|
|
qdf_nbuf_t nbuf_copy;
|
|
|
|
+ uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
|
|
|
|
|
|
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
|
|
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
|
|
nbuf))
|
|
nbuf))
|
|
@@ -870,14 +884,14 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
|
|
|
|
|
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
|
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
|
|
|
|
|
|
|
+ qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
|
|
dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
|
|
dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
|
|
|
|
|
|
|
|
+ dp_rx_nbuf_queue_mapping_set(nbuf_copy, ring_id);
|
|
if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy,
|
|
if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy,
|
|
tid_stats))
|
|
tid_stats))
|
|
return false;
|
|
return false;
|
|
|
|
|
|
- /* set TX notify flag 0 to avoid unnecessary TX comp callback */
|
|
|
|
- qdf_nbuf_tx_notify_comp_set(nbuf_copy, 0);
|
|
|
|
if (dp_tx_send((struct cdp_soc_t *)soc,
|
|
if (dp_tx_send((struct cdp_soc_t *)soc,
|
|
ta_peer->vdev->vdev_id, nbuf_copy)) {
|
|
ta_peer->vdev->vdev_id, nbuf_copy)) {
|
|
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
|
|
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
|
|
@@ -910,6 +924,7 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
|
struct cdp_tid_rx_stats *tid_stats)
|
|
struct cdp_tid_rx_stats *tid_stats)
|
|
{
|
|
{
|
|
uint16_t len;
|
|
uint16_t len;
|
|
|
|
+ uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
|
|
|
|
|
|
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
|
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
|
|
|
|
|
@@ -935,8 +950,10 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb));
|
|
dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf);
|
|
dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf);
|
|
|
|
|
|
|
|
+ dp_rx_nbuf_queue_mapping_set(nbuf, ring_id);
|
|
if (!dp_tx_send((struct cdp_soc_t *)soc,
|
|
if (!dp_tx_send((struct cdp_soc_t *)soc,
|
|
tx_vdev_id, nbuf)) {
|
|
tx_vdev_id, nbuf)) {
|
|
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
|
|
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
|