qcacmn: fix invalid accessing to rx_tlv_hdr due to scattered msdu
long length msdu is received and looks this msdu is spread across multiple nbufs, there is no corresbonding logic for this case. qdf_set_pkt_len will invoke pskb_expand_head to renew skb->head buffer, but the rx_tlv_hdr is still pointed to original skb->data buffer, invalid accessing will happen. As a WAR, drop this msdu related nbufs after dp_rx_sg_create is done. Change-Id: Iceb09fd04e4d768325018a8ddd4261ab4f75991a CRs-Fixed: 2597927
This commit is contained in:
@@ -1031,22 +1031,25 @@ static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
|
|||||||
* dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
|
* dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
|
||||||
* multiple nbufs.
|
* multiple nbufs.
|
||||||
* @nbuf: pointer to the first msdu of an amsdu.
|
* @nbuf: pointer to the first msdu of an amsdu.
|
||||||
* @rx_tlv_hdr: pointer to the start of RX TLV headers.
|
|
||||||
*
|
|
||||||
*
|
*
|
||||||
* This function implements the creation of RX frag_list for cases
|
* This function implements the creation of RX frag_list for cases
|
||||||
* where an MSDU is spread across multiple nbufs.
|
* where an MSDU is spread across multiple nbufs.
|
||||||
*
|
*
|
||||||
* Return: returns the head nbuf which contains complete frag_list.
|
* Return: returns the head nbuf which contains complete frag_list.
|
||||||
*/
|
*/
|
||||||
qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
|
qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf)
|
||||||
{
|
{
|
||||||
qdf_nbuf_t parent, frag_list, next = NULL;
|
qdf_nbuf_t parent, frag_list, next = NULL;
|
||||||
uint16_t frag_list_len = 0;
|
uint16_t frag_list_len = 0;
|
||||||
uint16_t mpdu_len;
|
uint16_t mpdu_len;
|
||||||
bool last_nbuf;
|
bool last_nbuf;
|
||||||
|
|
||||||
mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
|
/*
|
||||||
|
* Use msdu len got from REO entry descriptor instead since
|
||||||
|
* there is case the RX PKT TLV is corrupted while msdu_len
|
||||||
|
* from REO descriptor is right for non-raw RX scatter msdu.
|
||||||
|
*/
|
||||||
|
mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
||||||
/*
|
/*
|
||||||
* this is a case where the complete msdu fits in one single nbuf.
|
* this is a case where the complete msdu fits in one single nbuf.
|
||||||
* in this case HW sets both start and end bit and we only need to
|
* in this case HW sets both start and end bit and we only need to
|
||||||
@@ -1881,13 +1884,12 @@ more_data:
|
|||||||
if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
|
if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
|
||||||
qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
|
qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
|
||||||
|
|
||||||
if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
|
if (qdf_unlikely(msdu_desc_info.msdu_flags &
|
||||||
HAL_MPDU_F_RAW_AMPDU)) {
|
HAL_MSDU_F_MSDU_CONTINUATION)) {
|
||||||
/* previous msdu has end bit set, so current one is
|
/* previous msdu has end bit set, so current one is
|
||||||
* the new MPDU
|
* the new MPDU
|
||||||
*/
|
*/
|
||||||
if (is_prev_msdu_last) {
|
if (is_prev_msdu_last) {
|
||||||
is_prev_msdu_last = false;
|
|
||||||
/* Get number of entries available in HW ring */
|
/* Get number of entries available in HW ring */
|
||||||
num_entries_avail =
|
num_entries_avail =
|
||||||
hal_srng_dst_num_valid(hal_soc,
|
hal_srng_dst_num_valid(hal_soc,
|
||||||
@@ -1900,16 +1902,26 @@ more_data:
|
|||||||
*/
|
*/
|
||||||
if (((msdu_desc_info.msdu_len /
|
if (((msdu_desc_info.msdu_len /
|
||||||
(RX_BUFFER_SIZE - RX_PKT_TLVS_LEN) + 1)) >
|
(RX_BUFFER_SIZE - RX_PKT_TLVS_LEN) + 1)) >
|
||||||
num_entries_avail)
|
num_entries_avail) {
|
||||||
|
DP_STATS_INC(
|
||||||
|
soc,
|
||||||
|
rx.msdu_scatter_wait_break,
|
||||||
|
1);
|
||||||
break;
|
break;
|
||||||
} else {
|
}
|
||||||
if (msdu_desc_info.msdu_flags &
|
is_prev_msdu_last = false;
|
||||||
HAL_MSDU_F_LAST_MSDU_IN_MPDU)
|
|
||||||
is_prev_msdu_last = true;
|
|
||||||
}
|
}
|
||||||
qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
|
||||||
|
HAL_MPDU_F_RAW_AMPDU))
|
||||||
|
qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
|
||||||
|
|
||||||
|
if (!is_prev_msdu_last &&
|
||||||
|
msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
|
||||||
|
is_prev_msdu_last = true;
|
||||||
|
|
||||||
/* Pop out the descriptor*/
|
/* Pop out the descriptor*/
|
||||||
hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
|
hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
|
||||||
|
|
||||||
@@ -2064,7 +2076,7 @@ done:
|
|||||||
* Check if DMA completed -- msdu_done is the last bit
|
* Check if DMA completed -- msdu_done is the last bit
|
||||||
* to be written
|
* to be written
|
||||||
*/
|
*/
|
||||||
if (qdf_unlikely(!qdf_nbuf_is_raw_frame(nbuf) &&
|
if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
|
||||||
!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
|
!hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
|
||||||
dp_err("MSDU DONE failure");
|
dp_err("MSDU DONE failure");
|
||||||
DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
|
DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
|
||||||
@@ -2120,14 +2132,23 @@ done:
|
|||||||
qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
|
qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
|
||||||
|
|
||||||
qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
|
qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
|
||||||
} else if (qdf_nbuf_is_raw_frame(nbuf)) {
|
} else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
|
||||||
msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
||||||
nbuf = dp_rx_sg_create(nbuf, rx_tlv_hdr);
|
nbuf = dp_rx_sg_create(nbuf);
|
||||||
|
|
||||||
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
|
|
||||||
DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
|
|
||||||
|
|
||||||
next = nbuf->next;
|
next = nbuf->next;
|
||||||
|
|
||||||
|
if (qdf_nbuf_is_raw_frame(nbuf)) {
|
||||||
|
DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
|
||||||
|
DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
|
||||||
|
} else {
|
||||||
|
qdf_nbuf_free(nbuf);
|
||||||
|
DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
|
||||||
|
dp_info_rl("scatter msdu len %d, dropped",
|
||||||
|
msdu_len);
|
||||||
|
nbuf = next;
|
||||||
|
dp_peer_unref_del_find_by_id(peer);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
l2_hdr_offset =
|
l2_hdr_offset =
|
||||||
hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
|
hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc,
|
||||||
|
@@ -497,14 +497,13 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
|
|||||||
* dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
|
* dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
|
||||||
* multiple nbufs.
|
* multiple nbufs.
|
||||||
* @nbuf: pointer to the first msdu of an amsdu.
|
* @nbuf: pointer to the first msdu of an amsdu.
|
||||||
* @rx_tlv_hdr: pointer to the start of RX TLV headers.
|
|
||||||
*
|
*
|
||||||
* This function implements the creation of RX frag_list for cases
|
* This function implements the creation of RX frag_list for cases
|
||||||
* where an MSDU is spread across multiple nbufs.
|
* where an MSDU is spread across multiple nbufs.
|
||||||
*
|
*
|
||||||
* Return: returns the head nbuf which contains complete frag_list.
|
* Return: returns the head nbuf which contains complete frag_list.
|
||||||
*/
|
*/
|
||||||
qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr);
|
qdf_nbuf_t dp_rx_sg_create(qdf_nbuf_t nbuf);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dp_rx_desc_pool_alloc() - create a pool of software rx_descs
|
* dp_rx_desc_pool_alloc() - create a pool of software rx_descs
|
||||||
|
@@ -5920,6 +5920,12 @@ dp_print_soc_rx_stats(struct dp_soc *soc)
|
|||||||
DP_PRINT_STATS("RXDMA ERR DUP DESC: %d",
|
DP_PRINT_STATS("RXDMA ERR DUP DESC: %d",
|
||||||
soc->stats.rx.err.hal_rxdma_err_dup);
|
soc->stats.rx.err.hal_rxdma_err_dup);
|
||||||
|
|
||||||
|
DP_PRINT_STATS("RX scatter msdu: %d",
|
||||||
|
soc->stats.rx.err.scatter_msdu);
|
||||||
|
|
||||||
|
DP_PRINT_STATS("RX wait completed msdu break: %d",
|
||||||
|
soc->stats.rx.msdu_scatter_wait_break);
|
||||||
|
|
||||||
for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
|
for (i = 0; i < HAL_RXDMA_ERR_MAX; i++) {
|
||||||
index += qdf_snprint(&rxdma_error[index],
|
index += qdf_snprint(&rxdma_error[index],
|
||||||
DP_RXDMA_ERR_LENGTH - index,
|
DP_RXDMA_ERR_LENGTH - index,
|
||||||
|
@@ -735,6 +735,9 @@ struct dp_soc_stats {
|
|||||||
uint32_t hp_oos2;
|
uint32_t hp_oos2;
|
||||||
/* Rx ring near full */
|
/* Rx ring near full */
|
||||||
uint32_t near_full;
|
uint32_t near_full;
|
||||||
|
/* Break ring reaping as not all scattered msdu received */
|
||||||
|
uint32_t msdu_scatter_wait_break;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
/* Invalid RBM error count */
|
/* Invalid RBM error count */
|
||||||
uint32_t invalid_rbm;
|
uint32_t invalid_rbm;
|
||||||
@@ -775,6 +778,8 @@ struct dp_soc_stats {
|
|||||||
uint32_t hal_rxdma_err_dup;
|
uint32_t hal_rxdma_err_dup;
|
||||||
/* REO cmd send fail/requeue count */
|
/* REO cmd send fail/requeue count */
|
||||||
uint32_t reo_cmd_send_fail;
|
uint32_t reo_cmd_send_fail;
|
||||||
|
/* RX msdu drop count due to scatter */
|
||||||
|
uint32_t scatter_msdu;
|
||||||
} err;
|
} err;
|
||||||
|
|
||||||
/* packet count per core - per ring */
|
/* packet count per core - per ring */
|
||||||
|
Reference in New Issue
Block a user