qcacmn: Handle tx_sniffer pakcets

Send tx_sniffer to firmware and handle ppdu completions accordingly.

Change-Id: Id19a8cba4fdcad88bca6ade8c30a587c2da26d19
This commit is contained in:
Varsha Mishra
2019-05-28 11:54:46 +05:30
committed by nshrivas
parent 92e3f61248
commit 27c5bd3193
6 changed files with 107 additions and 15 deletions

View File

@@ -95,6 +95,8 @@
#define CDP_MU_MAX_USER_INDEX (CDP_MU_MAX_USERS - 1)
#define CDP_INVALID_PEER 0xffff
#define CDP_INVALID_TID 31
#define CDP_INVALID_TX_ENCAP_TYPE 6
#define CDP_INVALID_SEC_TYPE 12
#define CDP_DATA_TID_MAX 8
#define CDP_DATA_NON_QOS_TID 16
@@ -406,6 +408,8 @@ enum cdp_sec_type {
* @tid: Transmit Identifier
* @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet)
* @sec_type: sec_type to be passed to HAL
* @is_tx_sniffer: Indicates if the packet has to be sniffed
* @ppdu_cookie: 16-bit ppdu cookie that has to be replayed back in completions
*
* This structure holds the parameters needed in the exception path of tx
*
@@ -415,6 +419,8 @@ struct cdp_tx_exception_metadata {
uint8_t tid;
uint16_t tx_encap_type;
enum cdp_sec_type sec_type;
uint8_t is_tx_sniffer;
uint16_t ppdu_cookie;
};
typedef struct cdp_soc_t *ol_txrx_soc_handle;
@@ -1157,6 +1163,8 @@ struct cdp_tx_sojourn_stats {
* @mu_group_id: mu group id
* @rix: rate index
* @cookie: cookie to used by upper layer
* @is_ppdu_cookie_valid : Indicates that ppdu_cookie is valid
* @ppdu_cookie: 16-bit ppdu_cookie
*/
struct cdp_tx_completion_ppdu_user {
uint32_t completion_status:8,
@@ -1213,6 +1221,8 @@ struct cdp_tx_completion_ppdu_user {
uint32_t mu_group_id;
uint32_t rix;
struct cdp_stats_cookie *cookie;
uint8_t is_ppdu_cookie_valid;
uint16_t ppdu_cookie;
};
/**
@@ -1249,6 +1259,7 @@ struct cdp_tx_completion_ppdu_user {
* @ppdu_end_timestamp: TSF at PPDU end
* @ba_start_seq: Block Ack sequence number
* @ba_bitmap: Block Ack bitmap
* @ppdu_cookie: 16-bit ppdu_cookie
*/
struct cdp_tx_indication_mpdu_info {
uint32_t ppdu_id;
@@ -1278,6 +1289,7 @@ struct cdp_tx_indication_mpdu_info {
uint32_t ppdu_end_timestamp;
uint32_t ba_start_seq;
uint32_t ba_bitmap[CDP_BA_256_BIT_MAP_SIZE_DWORDS];
uint16_t ppdu_cookie;
};
/**

View File

@@ -430,6 +430,7 @@ struct cdp_pkt_type {
* @ru_start: RU start index
* @ru_tones: RU tones size
* @ru_loc: RU location 26/ 52/ 106/ 242/ 484 counter
* @num_ppdu_cookie_valid : Number of comp received with valid ppdu cookie
*/
struct cdp_tx_stats {
struct cdp_pkt_info comp_pkt;
@@ -515,6 +516,7 @@ struct cdp_tx_stats {
uint32_t ru_start;
uint32_t ru_tones;
uint32_t ru_loc[MAX_RU_LOCATIONS];
uint32_t num_ppdu_cookie_valid;
};
/* struct cdp_rx_stats - rx Level Stats
@@ -662,6 +664,7 @@ struct cdp_rx_stats {
* @completion_fw: packets completions received from fw
* @cce_classified:Number of packets classified by CCE
* @cce_classified_raw:Number of raw packets classified by CCE
* @sniffer_rcvd: Number of packets received with ppdu cookie
*/
struct cdp_tx_ingress_stats {
struct cdp_pkt_info rcvd;
@@ -725,6 +728,7 @@ struct cdp_tx_ingress_stats {
uint32_t cce_classified;
uint32_t cce_classified_raw;
struct cdp_pkt_info sniffer_rcvd;
};
/* struct cdp_vdev_stats - vdev stats structure

View File

@@ -155,6 +155,9 @@ dp_tx_stats_update(struct dp_soc *soc, struct dp_peer *peer,
return;
}
if (ppdu->is_ppdu_cookie_valid)
DP_STATS_INC(peer, tx.num_ppdu_cookie_valid, 1);
if (ppdu->mu_group_id <= MAX_MU_GROUP_ID &&
ppdu->ppdu_type != HTT_PPDU_STATS_PPDU_TYPE_SU) {
if (unlikely(!(ppdu->mu_group_id & (MAX_MU_GROUP_ID - 1))))
@@ -1937,6 +1940,14 @@ static void dp_process_ppdu_stats_user_common_tlv(
ppdu_user_desc->mpdu_tried_mcast = 0;
ppdu_user_desc->mpdu_tried_ucast = 0;
}
tag_buf += 3;
if (HTT_PPDU_STATS_IS_OPAQUE_VALID_GET(*tag_buf)) {
ppdu_user_desc->ppdu_cookie =
HTT_PPDU_STATS_HOST_OPAQUE_COOKIE_GET(*tag_buf);
ppdu_user_desc->is_ppdu_cookie_valid = 1;
}
}

View File

@@ -376,6 +376,8 @@ static inline void dp_update_pdev_stats(struct dp_pdev *tgtobj,
srcobj->tx.nawds_mcast.bytes;
tgtobj->stats.tx.nawds_mcast_drop +=
srcobj->tx.nawds_mcast_drop;
tgtobj->stats.tx.num_ppdu_cookie_valid +=
srcobj->tx.num_ppdu_cookie_valid;
tgtobj->stats.tx.tx_failed += srcobj->tx.tx_failed;
tgtobj->stats.tx.ofdma += srcobj->tx.ofdma;
tgtobj->stats.tx.stbc += srcobj->tx.stbc;
@@ -482,6 +484,7 @@ static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.cce_classified_raw);
DP_STATS_AGGR_PKT(tgtobj, srcobj, tx_i.sniffer_rcvd);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.exception_fw);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.mesh.completion_fw);
@@ -562,6 +565,8 @@ static inline void dp_update_vdev_stats(struct cdp_vdev_stats *tgtobj,
srcobj->stats.tx.nawds_mcast.bytes;
tgtobj->tx.nawds_mcast_drop +=
srcobj->stats.tx.nawds_mcast_drop;
tgtobj->tx.num_ppdu_cookie_valid +=
srcobj->stats.tx.num_ppdu_cookie_valid;
tgtobj->tx.tx_failed += srcobj->stats.tx.tx_failed;
tgtobj->tx.ofdma += srcobj->stats.tx.ofdma;
tgtobj->tx.stbc += srcobj->stats.tx.stbc;

View File

@@ -74,12 +74,13 @@ static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
* @queue: queue ids container for nbuf
*
* TX packet queue has 2 instances, software descriptors id and dma ring id
* Based on tx feature and hardware configuration queue id combination could be
* different.
* Based on tx feature and hardware configuration queue id combination
* could be different.
* For example -
* With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
* With no XPS,lock based resource protection, Descriptor pool ids are different
* for each vdev, dma ring id will be same as single pdev id
* With XPS enabled,all TX descriptor pools and dma ring are assigned
* per cpu id
* With no XPS,lock based resource protection, Descriptor pool ids are
* different for each vdev, dma ring id will be same as single pdev id
*
* Return: None
*/
@@ -256,6 +257,7 @@ dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
* dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
* @vdev: DP vdev Handle
* @nbuf: skb
* @msdu_info: msdu_info required to create HTT metadata
*
* Prepares and fills HTT metadata in the frame pre-header for special frames
* that should be transmitted using varying transmit parameters.
@@ -267,8 +269,9 @@ dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
*
*/
static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint32_t *meta_data)
struct dp_tx_msdu_info_s *msdu_info)
{
uint32_t *meta_data = msdu_info->meta_data;
struct htt_tx_msdu_desc_ext2_t *desc_ext =
(struct htt_tx_msdu_desc_ext2_t *) meta_data;
@@ -285,7 +288,7 @@ static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
if (vdev->mesh_vdev) {
if (vdev->mesh_vdev || msdu_info->is_tx_sniffer) {
if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
htt_desc_size_aligned)) {
DP_STATS_INC(vdev,
@@ -732,8 +735,9 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
/* Initialize the SW tx descriptor */
tx_desc->nbuf = nbuf;
tx_desc->frm_type = dp_tx_frm_std;
tx_desc->tx_encap_type = (tx_exc_metadata ?
tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
tx_desc->tx_encap_type = ((tx_exc_metadata &&
(tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
tx_desc->vdev = vdev;
tx_desc->pdev = pdev;
tx_desc->msdu_ext_desc = NULL;
@@ -773,7 +777,9 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
* |-----------------------------|
*/
if (qdf_unlikely((msdu_info->exception_fw)) ||
(vdev->opmode == wlan_op_mode_ocb)) {
(vdev->opmode == wlan_op_mode_ocb) ||
(tx_exc_metadata &&
tx_exc_metadata->is_tx_sniffer)) {
align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
@@ -789,7 +795,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
}
htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
msdu_info->meta_data);
msdu_info);
if (htt_hdr_size == 0)
goto failure;
tx_desc->pkt_offset = align_pad + htt_hdr_size;
@@ -1009,7 +1015,8 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_dma_addr_t dma_addr;
uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
enum cdp_sec_type sec_type = (tx_exc_metadata ?
enum cdp_sec_type sec_type = ((tx_exc_metadata &&
tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
tx_exc_metadata->sec_type : vdev->sec_type);
/* Return Buffer Manager ID */
@@ -1758,6 +1765,36 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
return nbuf;
}
/**
* dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
* @vdev: DP vdev handle
* @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
* @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
*
* Return: NULL on failure,
* nbuf when extracted successfully
*/
static
void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
struct dp_tx_msdu_info_s *msdu_info,
uint16_t ppdu_cookie)
{
struct htt_tx_msdu_desc_ext2_t *meta_data =
(struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
(msdu_info->meta_data[5], 1);
HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
(msdu_info->meta_data[5], 1);
HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
(msdu_info->meta_data[6], ppdu_cookie);
msdu_info->exception_fw = 1;
msdu_info->is_tx_sniffer = 1;
}
#ifdef MESH_MODE_SUPPORT
/**
@@ -1862,9 +1899,17 @@ qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
*/
static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
{
if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
tx_exc->sec_type > cdp_num_sec_types) {
bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
HTT_INVALID_TID);
bool invalid_encap_type = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
HTT_INVALID_TID);
bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
tx_exc->ppdu_cookie == 0);
if (invalid_tid || invalid_encap_type || invalid_sec_type ||
invalid_cookie) {
return false;
}
@@ -1892,6 +1937,9 @@ qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
qdf_mem_zero(&msdu_info, sizeof(msdu_info));
if (!tx_exc_metadata)
goto fail;
msdu_info.tid = tx_exc_metadata->tid;
eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
@@ -1940,6 +1988,14 @@ qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
}
}
if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
qdf_nbuf_len(nbuf));
dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
tx_exc_metadata->ppdu_cookie);
}
/*
* Get HW Queue to use for this frame.
* TCL supports upto 4 DMA rings, out of which 3 rings are

View File

@@ -117,6 +117,8 @@ struct dp_tx_queue {
* @u.sg_info: Scatter Gather information for non-TSO SG frames
* @meta_data: Mesh meta header information
* @exception_fw: Duplicate frame to be sent to firmware
* @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
* @ix_tx_sniffer: Indicates if the packet has to be sniffed
*
* This structure holds the complete MSDU information needed to program the
* Hardware TCL and MSDU extension descriptors for different frame types
@@ -133,6 +135,8 @@ struct dp_tx_msdu_info_s {
} u;
uint32_t meta_data[7];
uint8_t exception_fw;
uint16_t ppdu_cookie;
uint8_t is_tx_sniffer;
};
QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);