qcacmn: Use HAL_RX_BUF_RBM_SW3_RBM for defrag pkts
Currently defragmented packets use HAL_RX_BUF_RBM_SW1_RBM as the RBM value for the defragmented packets which are re-injected into REO. Thus, if REO encounters any error while handling these packets, they would end up in WBM2SW1 ring (via WBM), which is managed by the FW. The FW will eventually recycle these buffers back to RXDMA via its refill process. As a part of defragmentation, host does a 802.11 -> 802.3 header conversion. This is resulting in an address which is not 4 byte aligned. Hence, when RXDMA tries to use these addresses (after FW recycles them), it may lead to issues. Change the RBM value of the defragmented buffers which are re-injected. Now, if REO ends up throwing an error for these packets, they wll end up in WBM2SW3, which is managed by the host. The host can then drop these packets and replenish RXDMA with 4 byte aligned buffers (via FW). Change-Id: I9d9c25385978d5be855699feb28d292c6f3fffdd CRs-Fixed: 2572483
Este cometimento está contido em:

cometido por
nshrivas

ascendente
bcb1f1738f
cometimento
918456b6c3
@@ -237,8 +237,8 @@ static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid)
|
||||
struct dp_soc *psoc = peer->vdev->pdev->soc;
|
||||
struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid];
|
||||
|
||||
dp_info("Adding TID %u to waitlist for peer %pK at MAC address %pM",
|
||||
tid, peer, peer->mac_addr.raw);
|
||||
dp_debug("Adding TID %u to waitlist for peer %pK at MAC address %pM",
|
||||
tid, peer, peer->mac_addr.raw);
|
||||
|
||||
/* TODO: use LIST macros instead of TAIL macros */
|
||||
qdf_spin_lock_bh(&psoc->rx.defrag.defrag_lock);
|
||||
@@ -266,11 +266,11 @@ void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid)
|
||||
struct dp_rx_tid *rx_reorder;
|
||||
struct dp_rx_tid *tmp;
|
||||
|
||||
dp_info("Removing TID %u to waitlist for peer %pK at MAC address %pM",
|
||||
tid, peer, peer->mac_addr.raw);
|
||||
dp_debug("Removing TID %u to waitlist for peer %pK at MAC address %pM",
|
||||
tid, peer, peer->mac_addr.raw);
|
||||
|
||||
if (tid >= DP_MAX_TIDS) {
|
||||
dp_info("TID out of bounds: %d", tid);
|
||||
dp_err("TID out of bounds: %d", tid);
|
||||
qdf_assert_always(0);
|
||||
}
|
||||
|
||||
@@ -1109,7 +1109,7 @@ static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_WBM2SW_RBM);
|
||||
hal_rxdma_buff_addr_info_set(msdu0, paddr, cookie, DP_DEFRAG_RBM);
|
||||
|
||||
/* Lets fill entrance ring now !!! */
|
||||
if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
|
||||
@@ -1141,12 +1141,10 @@ static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
|
||||
HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
|
||||
MSDU_COUNT, 0x1);
|
||||
HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
|
||||
MPDU_SEQUENCE_NUMBER, seq_no);
|
||||
|
||||
MPDU_SEQUENCE_NUMBER, seq_no);
|
||||
/* unset frag bit */
|
||||
HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
|
||||
FRAGMENT_FLAG, 0x0);
|
||||
|
||||
/* set sa/da valid bits */
|
||||
HAL_RX_MPDU_DESC_INFO_SET(ent_mpdu_desc_info,
|
||||
SA_IS_VALID, 0x1);
|
||||
@@ -1696,9 +1694,8 @@ uint32_t dp_rx_frag_handle(struct dp_soc *soc, hal_ring_desc_t ring_desc,
|
||||
qdf_assert(mpdu_desc_info);
|
||||
qdf_assert(rx_desc);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"Number of MSDUs to process, num_msdus: %d",
|
||||
mpdu_desc_info->msdu_count);
|
||||
dp_debug("Number of MSDUs to process, num_msdus: %d",
|
||||
mpdu_desc_info->msdu_count);
|
||||
|
||||
|
||||
if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
|
||||
|
Criar uma nova questão referindo esta
Bloquear um utilizador