qcacmn: Monitor mode configuration and processing

This change is for supporting monitor mode VAP.  All the monitor mode ring
is configured. The related monitor mode ring includes:
-monitor mode buffer ring
-monitor mode destination ring
-monitor mode status ring
-monitor mode link descriptor ring
The packet is not sent to monitor mode ring unless the monitor mode VAP is
configured. This release support Multiple VAP - AP/STA VAP plus Monitor
VAP configuration. The status ring is not used in this release. However,
the ring is tested and the ring is moving and there are TLV's in the ring.

Change-Id: I782ee0c3b998d8b3bbac79b5e7fdecdbff15fa93
CRs-Fixed: 2013049
This commit is contained in:
Kai Chen
2017-01-12 10:17:53 -08:00
committed by Sandeep Puligilla
parent f151c38600
commit 6eca1a62da
21 changed files with 3164 additions and 137 deletions

View File

@@ -0,0 +1,199 @@
/*
* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/**
* @file cdp_txrx_mon_struct.h
* @brief Define the monitor mode API structure
* shared by data path and the OS interface module
*/
#ifndef _CDP_TXRX_MON_STRUCT_H_
#define _CDP_TXRX_MON_STRUCT_H_
/* XXX not really a mode; there are really multiple PHY's */
enum cdp_mon_phymode {
/* autoselect */
CDP_IEEE80211_MODE_AUTO = 0,
/* 5GHz, OFDM */
CDP_IEEE80211_MODE_11A = 1,
/* 2GHz, CCK */
CDP_IEEE80211_MODE_11B = 2,
/* 2GHz, OFDM */
CDP_IEEE80211_MODE_11G = 3,
/* 2GHz, GFSK */
CDP_IEEE80211_MODE_FH = 4,
/* 5GHz, OFDM, 2x clock dynamic turbo */
CDP_IEEE80211_MODE_TURBO_A = 5,
/* 2GHz, OFDM, 2x clock dynamic turbo */
CDP_IEEE80211_MODE_TURBO_G = 6,
/* 5Ghz, HT20 */
CDP_IEEE80211_MODE_11NA_HT20 = 7,
/* 2Ghz, HT20 */
CDP_IEEE80211_MODE_11NG_HT20 = 8,
/* 5Ghz, HT40 (ext ch +1) */
CDP_IEEE80211_MODE_11NA_HT40PLUS = 9,
/* 5Ghz, HT40 (ext ch -1) */
CDP_IEEE80211_MODE_11NA_HT40MINUS = 10,
/* 2Ghz, HT40 (ext ch +1) */
CDP_IEEE80211_MODE_11NG_HT40PLUS = 11,
/* 2Ghz, HT40 (ext ch -1) */
CDP_IEEE80211_MODE_11NG_HT40MINUS = 12,
/* 2Ghz, Auto HT40 */
CDP_IEEE80211_MODE_11NG_HT40 = 13,
/* 5Ghz, Auto HT40 */
CDP_IEEE80211_MODE_11NA_HT40 = 14,
/* 5Ghz, VHT20 */
CDP_IEEE80211_MODE_11AC_VHT20 = 15,
/* 5Ghz, VHT40 (Ext ch +1) */
CDP_IEEE80211_MODE_11AC_VHT40PLUS = 16,
/* 5Ghz VHT40 (Ext ch -1) */
CDP_IEEE80211_MODE_11AC_VHT40MINUS = 17,
/* 5Ghz, VHT40 */
CDP_IEEE80211_MODE_11AC_VHT40 = 18,
/* 5Ghz, VHT80 */
CDP_IEEE80211_MODE_11AC_VHT80 = 19,
/* 5Ghz, VHT160 */
CDP_IEEE80211_MODE_11AC_VHT160 = 20,
/* 5Ghz, VHT80_80 */
CDP_IEEE80211_MODE_11AC_VHT80_80 = 21,
};
enum {
CDP_PKT_TYPE_OFDM = 0,
CDP_PKT_TYPE_CCK,
CDP_PKT_TYPE_HT,
CDP_PKT_TYPE_VHT,
CDP_PKT_TYPE_HE,
};
enum {
CDP_SGI_0_8_US = 0,
CDP_SGI_0_4_US,
CDP_SGI_1_6_US,
CDP_SGI_3_2_US,
};
enum {
CDP_RX_TYPE_SU = 0,
CDP_RX_TYPE_MU_MIMO,
CDP_RX_TYPE_MU_OFDMA,
CDP_RX_TYPE_MU_OFDMA_MIMO,
};
enum {
CDP_FULL_RX_BW_20 = 0,
CDP_FULL_RX_BW_40,
CDP_FULL_RX_BW_80,
CDP_FULL_RX_BW_160,
};
struct cdp_mon_status {
int rs_numchains;
int rs_flags;
#define IEEE80211_RX_FCS_ERROR 0x01
#define IEEE80211_RX_MIC_ERROR 0x02
#define IEEE80211_RX_DECRYPT_ERROR 0x04
/* holes in flags here between, ATH_RX_XXXX to IEEE80211_RX_XXX */
#define IEEE80211_RX_KEYMISS 0x200
int rs_rssi; /* RSSI (noise floor ajusted) */
int rs_abs_rssi; /* absolute RSSI */
int rs_datarate; /* data rate received */
int rs_rateieee;
int rs_ratephy1;
int rs_ratephy2;
int rs_ratephy3;
/* Keep the same as ATH_MAX_ANTENNA */
#define IEEE80211_MAX_ANTENNA 3
/* RSSI (noise floor ajusted) */
u_int8_t rs_rssictl[IEEE80211_MAX_ANTENNA];
/* RSSI (noise floor ajusted) */
u_int8_t rs_rssiextn[IEEE80211_MAX_ANTENNA];
/* rs_rssi is valid or not */
u_int8_t rs_isvalidrssi;
enum cdp_mon_phymode rs_phymode;
int rs_freq;
union {
u_int8_t data[8];
u_int64_t tsf;
} rs_tstamp;
/*
* Detail channel structure of recv frame.
* It could be NULL if not available
*/
#ifdef ATH_SUPPORT_AOW
u_int16_t rs_rxseq; /* WLAN Sequence number */
#endif
#ifdef ATH_VOW_EXT_STATS
/* Lower 16 bits holds the udp checksum offset in the data pkt */
u_int32_t vow_extstats_offset;
/* Higher 16 bits contains offset in the data pkt at which vow
* ext stats are embedded
*/
#endif
u_int8_t rs_isaggr;
u_int8_t rs_isapsd;
int16_t rs_noisefloor;
u_int16_t rs_channel;
#ifdef ATH_SUPPORT_TxBF
u_int32_t rs_rpttstamp; /* txbf report time stamp*/
#endif
/* The following counts are meant to assist in stats calculation.
* These variables are incremented only in specific situations, and
* should not be relied upon for any purpose other than the original
* stats related purpose they have been introduced for.
*/
u_int16_t rs_cryptodecapcount; /* Crypto bytes decapped/demic'ed. */
u_int8_t rs_padspace; /* No. of padding bytes present after
header in wbuf. */
u_int8_t rs_qosdecapcount; /* QoS/HTC bytes decapped. */
/* End of stats calculation related counts. */
/*
* uint8_t rs_lsig[IEEE80211_LSIG_LEN];
* uint8_t rs_htsig[IEEE80211_HTSIG_LEN];
* uint8_t rs_servicebytes[IEEE80211_SB_LEN];
* uint8_t rs_fcs_error;
*/
/* cdp convergence monitor mode status */
union {
u_int8_t cdp_data[8];
u_int64_t cdp_tsf;
} cdp_rs_tstamp;
uint8_t cdp_rs_pream_type;
uint32_t cdp_rs_user_rssi;
uint8_t cdp_rs_stbc;
uint8_t cdp_rs_sgi;
uint32_t cdf_rs_rate_mcs;
uint32_t cdp_rs_reception_type;
uint32_t cdp_rs_bw;
uint32_t cdp_rs_nss;
uint8_t cdp_rs_fcs_err;
};
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
*
* Permission to use, copy, modify, and/or distribute this software for
@@ -31,6 +31,7 @@
#include <cdp_txrx_stats_struct.h>
#endif
#include "cdp_txrx_handle.h"
#include <cdp_txrx_mon_struct.h>
/******************************************************************************
*

View File

@@ -22,6 +22,7 @@
#include "dp_peer.h"
#include "dp_types.h"
#include "dp_internal.h"
#include "dp_rx_mon.h"
#define HTT_HTC_PKT_POOL_INIT_SIZE 64
@@ -305,7 +306,11 @@ int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
htt_ring_type = HTT_HW_TO_SW_RING;
break;
case RXDMA_DST:
case RXDMA_MONITOR_DESC:
htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
htt_ring_type = HTT_SW_TO_HW_RING;
break;
default:
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: Ring currently not supported\n", __func__);
@@ -465,7 +470,382 @@ int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
return 0;
return QDF_STATUS_SUCCESS;
fail1:
qdf_nbuf_free(htt_msg);
fail0:
return QDF_STATUS_E_FAILURE;
}
/*
* htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
* config message to target
* @htt_soc: HTT SOC handle
* @pdev_id: PDEV Id
* @hal_srng: Opaque HAL SRNG pointer
* @hal_ring_type: SRNG ring type
* @ring_buf_size: SRNG buffer size
* @htt_tlv_filter: Rx SRNG TLV and filter setting
* Return: 0 on success; error code on failure
*/
int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
int hal_ring_type, int ring_buf_size,
struct htt_rx_ring_tlv_filter *htt_tlv_filter)
{
struct htt_soc *soc = (struct htt_soc *)htt_soc;
struct dp_htt_htc_pkt *pkt;
qdf_nbuf_t htt_msg;
uint32_t *msg_word;
struct hal_srng_params srng_params;
uint32_t htt_ring_type, htt_ring_id;
uint32_t tlv_filter;
htt_msg = qdf_nbuf_alloc(soc->osdev,
HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
/* reserve room for the HTC header */
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
if (!htt_msg)
goto fail0;
hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
switch (hal_ring_type) {
case RXDMA_BUF:
#if QCA_HOST2FW_RXBUF_RING
htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
htt_ring_type = HTT_SW_TO_SW_RING;
#else
htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
htt_ring_type = HTT_SW_TO_HW_RING;
#endif
break;
case RXDMA_MONITOR_BUF:
htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
htt_ring_type = HTT_SW_TO_HW_RING;
break;
case RXDMA_MONITOR_STATUS:
htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
htt_ring_type = HTT_SW_TO_HW_RING;
break;
case RXDMA_MONITOR_DST:
htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
htt_ring_type = HTT_HW_TO_SW_RING;
break;
case RXDMA_MONITOR_DESC:
htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
htt_ring_type = HTT_SW_TO_HW_RING;
break;
default:
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: Ring currently not supported\n", __func__);
goto fail1;
}
/*
* Set the length of the message.
* The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
* separately during the below call to qdf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s: Failed to expand head for RX Ring Cfg msg\n",
__func__);
goto fail1; /* failure */
}
msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
/* rewind beyond alignment pad to get to the HTC header reserved area */
qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
/* word 0 */
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word, pdev_id);
/* TODO: Discuss with FW on changing this to unique ID and using
* htt_ring_type to send the type of ring
*/
HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
HTT_RX_RING_SELECTION_CFG_PKT_TLV_SET(*msg_word,
!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
/* word 1 */
msg_word++;
*msg_word = 0;
HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
ring_buf_size);
/* word 2 */
msg_word++;
*msg_word = 0;
if (htt_tlv_filter->enable_fp) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
MGMT, 0000, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
MGMT, 0001, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
MGMT, 0010, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
MGMT, 0011, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
MGMT, 0100, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
MGMT, 0101, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
MGMT, 0110, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
MGMT, 1000, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
MGMT, 1001, 1);
}
if (htt_tlv_filter->enable_md) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
MGMT, 0000, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
MGMT, 0001, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
MGMT, 0010, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
MGMT, 0011, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
MGMT, 0100, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
MGMT, 0101, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
MGMT, 0110, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
MGMT, 1000, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
MGMT, 1001, 1);
}
if (htt_tlv_filter->enable_mo) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
MGMT, 0000, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
MGMT, 0001, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
MGMT, 0010, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
MGMT, 0011, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
MGMT, 0100, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
MGMT, 0101, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
MGMT, 0110, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
MGMT, 1000, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
MGMT, 1001, 1);
}
/* word 3 */
msg_word++;
*msg_word = 0;
if (htt_tlv_filter->enable_fp) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
MGMT, 1010, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
MGMT, 1011, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
MGMT, 1100, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
MGMT, 1101, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
MGMT, 1110, 1);
}
if (htt_tlv_filter->enable_md) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
MGMT, 1010, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
MGMT, 1011, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
MGMT, 1100, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
MGMT, 1101, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MD,
MGMT, 1110, 1);
}
if (htt_tlv_filter->enable_mo) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
MGMT, 1010, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
MGMT, 1011, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
MGMT, 1100, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
MGMT, 1101, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
MGMT, 1110, 1);
}
/* word 4 */
msg_word++;
*msg_word = 0;
if (htt_tlv_filter->enable_fp) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
CTRL, 0111, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
CTRL, 1000, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
CTRL, 1001, 1);
}
if (htt_tlv_filter->enable_md) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
CTRL, 0111, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
CTRL, 1000, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
CTRL, 1001, 1);
}
if (htt_tlv_filter->enable_mo) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
CTRL, 0111, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
CTRL, 1000, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
CTRL, 1001, 1);
}
/* word 5 */
msg_word++;
*msg_word = 0;
if (htt_tlv_filter->enable_fp) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
CTRL, 1010, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
CTRL, 1011, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
CTRL, 1100, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
CTRL, 1101, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
CTRL, 1110, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
CTRL, 1111, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
CTRL, 1111, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
DATA, MCAST, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
DATA, UCAST, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
DATA, NULL, 1);
}
if (htt_tlv_filter->enable_md) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
CTRL, 1010, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
CTRL, 1011, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
CTRL, 1100, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
CTRL, 1101, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
CTRL, 1110, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
CTRL, 1111, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
CTRL, 1111, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
DATA, MCAST, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
DATA, UCAST, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
DATA, NULL, 1);
}
if (htt_tlv_filter->enable_mo) {
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
CTRL, 1010, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
CTRL, 1011, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
CTRL, 1100, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
CTRL, 1101, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
CTRL, 1110, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
CTRL, 1111, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
CTRL, 1111, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
DATA, MCAST, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
DATA, UCAST, 1);
htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
DATA, NULL, 1);
}
/* word 6 */
msg_word++;
*msg_word = 0;
tlv_filter = 0;
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
htt_tlv_filter->mpdu_start);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
htt_tlv_filter->msdu_start);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
htt_tlv_filter->packet);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
htt_tlv_filter->msdu_end);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
htt_tlv_filter->mpdu_end);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
htt_tlv_filter->packet_header);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
htt_tlv_filter->ppdu_end_status_done);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
htt_tlv_filter->ppdu_start);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
htt_tlv_filter->ppdu_end);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
htt_tlv_filter->ppdu_end_user_stats);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
PPDU_END_USER_STATS_EXT,
htt_tlv_filter->ppdu_end_user_stats_ext);
htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
htt_tlv_filter->ppdu_end_status_done);
HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
/* "response_required" field should be set if a HTT response message is
* required after setting up the ring.
*/
pkt = htt_htc_pkt_alloc(soc);
if (!pkt)
goto fail1;
pkt->soc_ctxt = NULL; /* not used during send-done callback */
SET_HTC_PACKET_INFO_TX(
&pkt->htc_pkt,
dp_htt_h2t_send_complete_free_netbuf,
qdf_nbuf_data(htt_msg),
qdf_nbuf_len(htt_msg),
soc->htc_endpoint,
1); /* tag - not relevant here */
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
return QDF_STATUS_SUCCESS;
fail1:
qdf_nbuf_free(htt_msg);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016 The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -81,6 +81,43 @@ struct htt_soc {
HTT_TX_MUTEX_TYPE htt_tx_mutex;
};
/**
* struct htt_rx_ring_tlv_filter - Rx ring TLV filter
* enable/disable.
* @mpdu_start: enable/disable MPDU start TLV
* @msdu_start: enable/disable MSDU start TLV
* @packet: enable/disable PACKET TLV
* @msdu_end: enable/disable MSDU end TLV
* @mpdu_end: enable/disable MPDU end TLV
* @packet_header: enable/disable PACKET header TLV
* @attention: enable/disable ATTENTION TLV
* @ppdu_start: enable/disable PPDU start TLV
* @ppdu_end: enable/disable PPDU end TLV
* @ppdu_end_user_stats: enable/disable PPDU user stats TLV
* @ppdu_end_user_stats_ext: enable/disable PPDU user stats ext TLV
* @ppdu_end_status_done: enable/disable PPDU end status done TLV
* @enable_fp: enable/disable FP packet
* @enable_md: enable/disable MD packet
* @enable_mo: enable/disable MO packet
*/
struct htt_rx_ring_tlv_filter {
u_int32_t mpdu_start:1,
msdu_start:1,
packet:1,
msdu_end:1,
mpdu_end:1,
packet_header:1,
attention:1,
ppdu_start:1,
ppdu_end:1,
ppdu_end_user_stats:1,
ppdu_end_user_stats_ext:1,
ppdu_end_status_done:1,
enable_fp:1,
enable_md:1,
enable_mo:1;
};
void *
htt_soc_attach(void *txrx_soc, void *osif_soc, HTC_HANDLE htc_soc,
void *hal_soc, qdf_device_t osdev);
@@ -92,4 +129,20 @@ int htt_srng_setup(void *htt_soc, int pdev_id, void *hal_srng,
int htt_soc_attach_target(void *htt_soc);
/*
* htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
* config message to target
* @htt_soc: HTT SOC handle
* @pdev_id: PDEV Id
* @hal_srng: Opaque HAL SRNG pointer
* @hal_ring_type: SRNG ring type
* @ring_buf_size: SRNG buffer size
* @htt_tlv_filter: Rx SRNG TLV and filter setting
*
* Return: 0 on success; error code on failure
*/
int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
int hal_ring_type, int ring_buf_size,
struct htt_rx_ring_tlv_filter *htt_tlv_filter);
#endif /* _DP_HTT_H_ */

View File

@@ -34,6 +34,7 @@
#include "cdp_txrx_cmn_struct.h"
#include <qdf_util.h>
#include "dp_peer.h"
#include "dp_rx_mon.h"
#define DP_INTR_POLL_TIMER_MS 100
#define DP_MCS_LENGTH (6*MAX_MCS)
@@ -302,6 +303,15 @@ static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
if (reo_status_mask)
dp_reo_status_ring_handler(soc);
/* Process Rx monitor interrupts */
for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
if (int_ctx->rx_mon_ring_mask & (1 << ring)) {
work_done =
dp_mon_process(soc, ring, budget);
budget -= work_done;
}
}
budget_done:
return dp_budget - budget;
}
@@ -346,7 +356,7 @@ static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
soc->intr_ctx[i].tx_ring_mask = 0xF;
soc->intr_ctx[i].rx_ring_mask = 0xF;
soc->intr_ctx[i].rx_mon_ring_mask = 0xF;
soc->intr_ctx[i].rx_mon_ring_mask = 0x1;
soc->intr_ctx[i].rx_err_ring_mask = 0x1;
soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0x1;
soc->intr_ctx[i].reo_status_ring_mask = 0x1;
@@ -789,9 +799,10 @@ static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
#define REO_STATUS_RING_SIZE 32
#define RXDMA_BUF_RING_SIZE 1024
#define RXDMA_REFILL_RING_SIZE 2048
#define RXDMA_MONITOR_BUF_RING_SIZE 2048
#define RXDMA_MONITOR_DST_RING_SIZE 2048
#define RXDMA_MONITOR_STATUS_RING_SIZE 2048
#define RXDMA_MONITOR_BUF_RING_SIZE 1024
#define RXDMA_MONITOR_DST_RING_SIZE 1024
#define RXDMA_MONITOR_STATUS_RING_SIZE 1024
#define RXDMA_MONITOR_DESC_RING_SIZE 1024
/*
* dp_soc_cmn_setup() - Common SoC level initializion
@@ -1205,6 +1216,13 @@ static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
goto fail1;
}
if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring,
RXDMA_MONITOR_DESC, 0, pdev_id, RXDMA_MONITOR_DESC_RING_SIZE)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"dp_srng_setup failed for rxdma_mon_desc_ring\n");
goto fail1;
}
/* Rx specific init */
if (dp_rx_pdev_attach(pdev)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -1220,6 +1238,13 @@ static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
dp_lro_hash_setup(soc);
dp_dscp_tid_map_setup(pdev);
/* Rx monitor mode specific init */
if (dp_rx_pdev_mon_attach(pdev)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"dp_rx_pdev_attach failed\n");
goto fail0;
}
return (struct cdp_pdev *)pdev;
fail1:
@@ -1278,6 +1303,8 @@ static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
dp_rx_pdev_detach(pdev);
dp_rx_pdev_mon_detach(pdev);
/* Setup per PDEV REO rings if configured */
if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
@@ -1295,6 +1322,9 @@ static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
RXDMA_MONITOR_STATUS, 0);
dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring,
RXDMA_MONITOR_DESC, 0);
soc->pdev_list[pdev->pdev_id] = NULL;
qdf_mem_free(pdev);
@@ -1481,6 +1511,19 @@ static void dp_rxdma_ring_config(struct dp_soc *soc)
if (pdev) {
htt_srng_setup(soc->htt_handle, i,
pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
htt_srng_setup(soc->htt_handle, i,
pdev->rxdma_mon_buf_ring.hal_srng,
RXDMA_MONITOR_BUF);
htt_srng_setup(soc->htt_handle, i,
pdev->rxdma_mon_dst_ring.hal_srng,
RXDMA_MONITOR_DST);
htt_srng_setup(soc->htt_handle, i,
pdev->rxdma_mon_status_ring.hal_srng,
RXDMA_MONITOR_STATUS);
htt_srng_setup(soc->htt_handle, i,
pdev->rxdma_mon_desc_ring.hal_srng,
RXDMA_MONITOR_DESC);
}
}
}
@@ -2043,6 +2086,85 @@ static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
}
/**
* dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
* @vdev_handle: Datapath VDEV handle
*
* Return: 0 on success, not 0 on failure
*/
static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle)
{
/* Many monitor VAPs can exists in a system but only one can be up at
* anytime
*/
struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
struct dp_pdev *pdev;
struct htt_rx_ring_tlv_filter htt_tlv_filter;
struct dp_soc *soc;
uint8_t pdev_id;
qdf_assert(vdev);
pdev = vdev->pdev;
pdev_id = pdev->pdev_id;
soc = pdev->soc;
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
"pdev=%p, pdev_id=%d, soc=%p vdev=%p\n",
pdev, pdev_id, soc, vdev);
/*Check if current pdev's monitor_vdev exists */
if (pdev->monitor_vdev) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"vdev=%p\n", vdev);
qdf_assert(vdev);
}
pdev->monitor_vdev = vdev;
htt_tlv_filter.mpdu_start = 1;
htt_tlv_filter.msdu_start = 1;
htt_tlv_filter.packet = 1;
htt_tlv_filter.msdu_end = 1;
htt_tlv_filter.mpdu_end = 1;
htt_tlv_filter.packet_header = 1;
htt_tlv_filter.attention = 1;
htt_tlv_filter.ppdu_start = 0;
htt_tlv_filter.ppdu_end = 0;
htt_tlv_filter.ppdu_end_user_stats = 0;
htt_tlv_filter.ppdu_end_user_stats_ext = 0;
htt_tlv_filter.ppdu_end_status_done = 0;
htt_tlv_filter.enable_fp = 1;
htt_tlv_filter.enable_md = 0;
htt_tlv_filter.enable_mo = 1;
htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
pdev->rxdma_mon_dst_ring.hal_srng,
RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
htt_tlv_filter.mpdu_start = 1;
htt_tlv_filter.msdu_start = 1;
htt_tlv_filter.packet = 0;
htt_tlv_filter.msdu_end = 1;
htt_tlv_filter.mpdu_end = 1;
htt_tlv_filter.packet_header = 1;
htt_tlv_filter.attention = 1;
htt_tlv_filter.ppdu_start = 1;
htt_tlv_filter.ppdu_end = 1;
htt_tlv_filter.ppdu_end_user_stats = 1;
htt_tlv_filter.ppdu_end_user_stats_ext = 1;
htt_tlv_filter.ppdu_end_status_done = 1;
htt_tlv_filter.enable_fp = 1;
htt_tlv_filter.enable_md = 1;
htt_tlv_filter.enable_mo = 1;
/*
* htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
* pdev->rxdma_mon_status_ring.hal_srng,
* RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
*/
return QDF_STATUS_SUCCESS;
}
#ifdef MESH_MODE_SUPPORT
void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
@@ -2923,6 +3045,7 @@ static struct cdp_cmn_ops dp_ops_cmn = {
.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
.txrx_stats = dp_txrx_stats,
.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
/* TODO: Add other functions */
};
@@ -2942,7 +3065,13 @@ static struct cdp_me_ops dp_ops_me = {
};
static struct cdp_mon_ops dp_ops_mon = {
/* TODO */
.txrx_monitor_set_filter_ucast_data = NULL,
.txrx_monitor_set_filter_mcast_data = NULL,
.txrx_monitor_set_filter_non_data = NULL,
.txrx_monitor_get_filter_ucast_data = NULL,
.txrx_monitor_get_filter_mcast_data = NULL,
.txrx_monitor_get_filter_non_data = NULL,
.txrx_reset_monitor_mode = NULL,
};
static struct cdp_host_stats_ops dp_ops_host_stats = {

View File

@@ -35,13 +35,19 @@
*
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids
* @dp_rxdma_srng: dp rxdma circular ring
* @rx_desc_pool: Poiter to free Rx descriptor pool
* @num_req_buffers: number of buffer to be replenished
* @desc_list: list of descs if called from dp_rx_process
* or NULL during dp rx initialization or out of buffer
* interrupt.
* @tail: tail of descs list
* @owner: who owns the nbuf (host, NSS etc...)
* Return: return success or failure
*/
QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail,
@@ -57,9 +63,11 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
qdf_nbuf_t rx_netbuf;
void *rxdma_ring_entry;
union dp_rx_desc_list_elem_t *next;
struct dp_srng *dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
void *rxdma_srng = dp_rxdma_srng->hal_srng;
int32_t ret;
QDF_STATUS ret;
void *rxdma_srng;
rxdma_srng = dp_rxdma_srng->hal_srng;
if (!rxdma_srng) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -68,14 +76,16 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
return QDF_STATUS_E_FAILURE;
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"requested %d buffers for replenish", num_req_buffers);
/*
* if desc_list is NULL, allocate the descs from freelist
*/
if (!(*desc_list)) {
num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
rx_desc_pool,
num_req_buffers,
desc_list,
tail);
@@ -98,7 +108,7 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
rxdma_srng,
sync_hw_ptr);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"no of availble entries in rxdma ring: %d",
num_entries_avail);
@@ -144,6 +154,12 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
(*desc_list)->rx_desc.nbuf = rx_netbuf;
DP_STATS_INC_PKT(dp_pdev, replenished, 1,
qdf_nbuf_len(rx_netbuf));
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"rx_netbuf=%p, buf=%p, paddr=0x%llx, cookie=%d\n",
rx_netbuf, qdf_nbuf_data(rx_netbuf),
(unsigned long long)paddr, (*desc_list)->rx_desc.cookie);
hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
(*desc_list)->rx_desc.cookie,
owner);
@@ -153,18 +169,19 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"successfully replenished %d buffers", num_req_buffers);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%d rx desc added back to free list", num_desc_to_free);
DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
/*
* add any available free desc back to the free list
*/
if (*desc_list)
dp_rx_add_desc_list_to_free_list(dp_soc, desc_list,
tail, mac_id);
dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
mac_id, rx_desc_pool);
return QDF_STATUS_SUCCESS;
}
@@ -518,6 +535,9 @@ dp_rx_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
uint16_t i, vdev_cnt = 0;
uint32_t ampdu_flag, amsdu_flag;
struct ether_header *eh;
struct dp_pdev *pdev;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
/* Debug -- Remove later */
qdf_assert(soc && hal_ring);
@@ -559,7 +579,7 @@ dp_rx_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
rx_desc = dp_rx_cookie_2_va(soc, rx_buf_cookie);
rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
qdf_assert(rx_desc);
rx_bufs_reaped[rx_desc->pool_id]++;
@@ -648,10 +668,13 @@ done:
if (!rx_bufs_reaped[mac_id])
continue;
dp_rx_buffers_replenish(soc, mac_id,
rx_bufs_reaped[mac_id],
&head[mac_id],
&tail[mac_id],
pdev = soc->pdev_list[mac_id];
dp_rxdma_srng = &pdev->rx_refill_buf_ring;
rx_desc_pool = &soc->rx_desc_buf[mac_id];
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
rx_desc_pool, rx_bufs_reaped[mac_id],
&head[mac_id], &tail[mac_id],
HAL_RX_BUF_RBM_SW3_BM);
}
@@ -849,7 +872,7 @@ done:
/**
* dp_rx_detach() - detach dp rx
* @soc: core txrx main context
* @pdev: core txrx pdev context
*
* This function will detach DP RX into main device context
* will free DP Rx resources.
@@ -861,8 +884,11 @@ dp_rx_pdev_detach(struct dp_pdev *pdev)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
dp_rx_desc_pool_free(soc, pdev_id);
rx_desc_pool = &soc->rx_desc_buf[pdev_id];
dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
qdf_spinlock_destroy(&soc->rx_desc_mutex[pdev_id]);
return;
@@ -870,7 +896,7 @@ dp_rx_pdev_detach(struct dp_pdev *pdev)
/**
* dp_rx_attach() - attach DP RX
* @soc: core txrx main context
* @pdev: core txrx pdev context
*
* This function will attach a DP RX instance into the main
* device (SOC) context. Will allocate dp rx resource and
@@ -888,6 +914,8 @@ dp_rx_pdev_attach(struct dp_pdev *pdev)
uint32_t rxdma_entries;
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
qdf_spinlock_create(&soc->rx_desc_mutex[pdev_id]);
pdev = soc->pdev_list[pdev_id];
@@ -895,11 +923,14 @@ dp_rx_pdev_attach(struct dp_pdev *pdev)
rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize(
soc->hal_soc, RXDMA_BUF);
dp_rx_desc_pool_alloc(soc, pdev_id);
rx_desc_pool = &soc->rx_desc_buf[pdev_id];
dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries*3, rx_desc_pool);
/* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
dp_rx_buffers_replenish(soc, pdev_id, rxdma_entries,
&desc_list, &tail, HAL_RX_BUF_RBM_SW3_BM);
dp_rxdma_srng = &pdev->rx_refill_buf_ring;
dp_rx_buffers_replenish(soc, pdev_id, dp_rxdma_srng, rx_desc_pool,
rxdma_entries, &desc_list, &tail, HAL_RX_BUF_RBM_SW3_BM);
return QDF_STATUS_SUCCESS;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017 The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -92,51 +92,94 @@ union dp_rx_desc_list_elem_t {
};
/**
* dp_rx_cookie_2_va() - Converts cookie to a virtual address of
* the Rx descriptor.
* dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
* the Rx descriptor on Rx DMA source ring buffer
* @soc: core txrx main context
* @cookie: cookie used to lookup virtual address
*
* Return: void *: Virtual Address of the Rx descriptor
*/
static inline
void *dp_rx_cookie_2_va(struct dp_soc *soc, uint32_t cookie)
void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
{
uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
/* TODO */
/* Add sanity for pool_id & index */
return &(soc->rx_desc[pool_id].array[index].rx_desc);
return &(soc->rx_desc_buf[pool_id].array[index].rx_desc);
}
/**
* dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
* the Rx descriptor on monitor ring buffer
* @soc: core txrx main context
* @cookie: cookie used to lookup virtual address
*
* Return: void *: Virtual Address of the Rx descriptor
*/
static inline
void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
{
uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
/* TODO */
/* Add sanity for pool_id & index */
return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
}
/**
* dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
* the Rx descriptor on monitor status ring buffer
* @soc: core txrx main context
* @cookie: cookie used to lookup virtual address
*
* Return: void *: Virtual Address of the Rx descriptor
*/
static inline
void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
{
uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
/* TODO */
/* Add sanity for pool_id & index */
return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
}
void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
union dp_rx_desc_list_elem_t **local_desc_list,
union dp_rx_desc_list_elem_t **tail,
uint16_t pool_id);
uint16_t pool_id,
struct rx_desc_pool *rx_desc_pool);
uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
struct rx_desc_pool *rx_desc_pool,
uint16_t num_descs,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail);
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id);
void dp_rx_desc_pool_free(struct dp_soc *soc, uint32_t pool_id);
QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
void dp_rx_pdev_detach(struct dp_pdev *pdev);
QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail,
uint8_t owner);
uint32_t dp_rx_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
uint32_t
dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
uint32_t pool_id,
uint32_t pool_size,
struct rx_desc_pool *rx_desc_pool);
void dp_rx_desc_pool_free(struct dp_soc *soc,
uint32_t pool_id,
struct rx_desc_pool *rx_desc_pool);
/**
* dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
*
@@ -294,5 +337,97 @@ static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
return QDF_STATUS_E_FAILURE;
}
#endif
/**
* dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
* the MSDU Link Descriptor
* @soc: core txrx main context
* @buf_info: buf_info include cookie that used to lookup virtual address of
* link descriptor Normally this is just an index into a per SOC array.
*
* This is the VA of the link descriptor, that HAL layer later uses to
* retrieve the list of MSDU's for a given MPDU.
*
* Return: void *: Virtual Address of the Rx descriptor
*/
static inline
void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
struct hal_buf_info *buf_info)
{
void *link_desc_va;
/* TODO */
/* Add sanity for cookie */
link_desc_va = soc->link_desc_banks[buf_info->sw_cookie].base_vaddr +
(buf_info->paddr -
soc->link_desc_banks[buf_info->sw_cookie].base_paddr);
return link_desc_va;
}
/**
* dp_rx_cookie_2_mon_link_desc_va() - Converts cookie to a virtual address of
* the MSDU Link Descriptor
* @pdev: core txrx pdev context
* @buf_info: buf_info includes cookie that used to lookup virtual address of
* link descriptor. Normally this is just an index into a per pdev array.
*
* This is the VA of the link descriptor in monitor mode destination ring,
* that HAL layer later uses to retrieve the list of MSDU's for a given MPDU.
*
* Return: void *: Virtual Address of the Rx descriptor
*/
static inline
void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
struct hal_buf_info *buf_info)
{
void *link_desc_va;
/* TODO */
/* Add sanity for cookie */
link_desc_va = pdev->link_desc_banks[buf_info->sw_cookie].base_vaddr +
(buf_info->paddr -
pdev->link_desc_banks[buf_info->sw_cookie].base_paddr);
return link_desc_va;
}
/*
* dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
* called during dp rx initialization
* and at the end of dp_rx_process.
*
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids
* @dp_rxdma_srng: dp rxdma circular ring
* @rx_desc_pool: Poiter to free Rx descriptor pool
* @num_req_buffers: number of buffer to be replenished
* @desc_list: list of descs if called from dp_rx_process
* or NULL during dp rx initialization or out of buffer
* interrupt.
* @tail: tail of descs list
* @owner: who owns the nbuf (host, NSS etc...)
* Return: return success or failure
*/
QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail,
uint8_t owner);
/**
* dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
* (WBM), following error handling
*
* @soc: core DP main context
* @buf_addr_info: opaque pointer to the REO error ring descriptor
* @buf_addr_info: void pointer to the buffer_addr_info
* Return: QDF_STATUS
*/
QDF_STATUS
dp_rx_link_desc_buf_return(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
void *buf_addr_info);
#endif /* _DP_RX_H */

View File

@@ -25,43 +25,41 @@
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @pool_size: number of Rx descriptor in the pool
* @rx_desc_pool: rx descriptor pool pointer
*
* return success or failure
*/
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id)
QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
{
uint32_t i;
struct dp_pdev *dp_pdev = soc->pdev_list[pool_id];
struct dp_srng *rxdma_srng = &dp_pdev->rx_refill_buf_ring;
soc->rx_desc[pool_id].array = qdf_mem_malloc(
((rxdma_srng->alloc_size/hal_srng_get_entrysize(soc->hal_soc,
RXDMA_BUF)) * 3) * sizeof(union dp_rx_desc_list_elem_t));
rx_desc_pool->array =
qdf_mem_malloc(pool_size*sizeof(union dp_rx_desc_list_elem_t));
if (!(soc->rx_desc[pool_id].array)) {
if (!(rx_desc_pool->array)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s: RX Desc Pool[%d] allocation failed\n",
__func__, pool_id);
return QDF_STATUS_E_NOMEM;
}
qdf_spin_lock_bh(&soc->rx_desc_mutex[pool_id]);
soc->rx_desc[pool_id].pool_size = (rxdma_srng->alloc_size/
hal_srng_get_entrysize(soc->hal_soc, RXDMA_BUF)) * 3;
rx_desc_pool->pool_size = pool_size;
/* link SW rx descs into a freelist */
soc->rx_desc[pool_id].freelist = &soc->rx_desc[pool_id].array[0];
for (i = 0; i < soc->rx_desc[pool_id].pool_size-1; i++) {
soc->rx_desc[pool_id].array[i].next =
&soc->rx_desc[pool_id].array[i+1];
soc->rx_desc[pool_id].array[i].rx_desc.cookie =
i | (pool_id << 18);
soc->rx_desc[pool_id].array[i].rx_desc.pool_id = pool_id;
rx_desc_pool->freelist = &rx_desc_pool->array[0];
for (i = 0; i < rx_desc_pool->pool_size-1; i++) {
rx_desc_pool->array[i].next = &rx_desc_pool->array[i+1];
rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
}
soc->rx_desc[pool_id].array[i].next = NULL;
soc->rx_desc[pool_id].array[i].rx_desc.cookie = i | (pool_id << 18);
soc->rx_desc[pool_id].array[i].rx_desc.pool_id = pool_id;
rx_desc_pool->array[i].next = NULL;
rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
return QDF_STATUS_SUCCESS;
}
@@ -72,11 +70,14 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id)
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @rx_desc_pool: rx descriptor pool pointer
*/
void dp_rx_desc_pool_free(struct dp_soc *soc, uint32_t pool_id)
void dp_rx_desc_pool_free(struct dp_soc *soc, uint32_t pool_id,
struct rx_desc_pool *rx_desc_pool)
{
qdf_spin_lock_bh(&soc->rx_desc_mutex[pool_id]);
qdf_mem_free(soc->rx_desc[pool_id].array);
qdf_mem_free(rx_desc_pool->array);
qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
}
@@ -86,12 +87,15 @@ void dp_rx_desc_pool_free(struct dp_soc *soc, uint32_t pool_id)
*
* @soc: core txrx main context
* @pool_id: pool_id which is one of 3 mac_ids
* @rx_desc_pool: rx descriptor pool pointer
* @num_descs: number of descs requested from freelist
* @desc_list: attach the descs to this list (output parameter)
* @tail: attach the point to last desc of free list (output parameter)
*
* Return: number of descs allocated from free list.
*/
uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
struct rx_desc_pool *rx_desc_pool,
uint16_t num_descs,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail)
@@ -99,7 +103,8 @@ uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
uint16_t count;
qdf_spin_lock_bh(&soc->rx_desc_mutex[pool_id]);
*desc_list = soc->rx_desc[pool_id].freelist;
*desc_list = rx_desc_pool->freelist;
if (!(*desc_list)) {
qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
@@ -108,10 +113,9 @@ uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
for (count = 0; count < num_descs; count++) {
*tail = soc->rx_desc[pool_id].freelist;
soc->rx_desc[pool_id].freelist =
soc->rx_desc[pool_id].freelist->next;
if (qdf_unlikely(!soc->rx_desc[pool_id].freelist)) {
*tail = rx_desc_pool->freelist;
rx_desc_pool->freelist = rx_desc_pool->freelist->next;
if (qdf_unlikely(!rx_desc_pool->freelist)) {
qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
return count;
}
@@ -125,24 +129,27 @@ uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
* freelist.
*
* @soc: core txrx main context
* @local_desc_list: local desc list provided by the caller (output param)
* @local_desc_list: local desc list provided by the caller
* @tail: attach the point to last desc of local desc list
* @pool_id: pool_id which is one of 3 mac_ids
* @rx_desc_pool: rx descriptor pool pointer
*/
void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
union dp_rx_desc_list_elem_t **local_desc_list,
union dp_rx_desc_list_elem_t **tail,
uint16_t pool_id)
uint16_t pool_id,
struct rx_desc_pool *rx_desc_pool)
{
union dp_rx_desc_list_elem_t *temp_list = NULL;
qdf_spin_lock_bh(&soc->rx_desc_mutex[pool_id]);
temp_list = soc->rx_desc[pool_id].freelist;
temp_list = rx_desc_pool->freelist;
qdf_print(
"temp_list: %p, *local_desc_list: %p, *tail: %p (*tail)->next: %p\n",
temp_list, *local_desc_list, *tail, (*tail)->next);
soc->rx_desc[pool_id].freelist = *local_desc_list;
rx_desc_pool->freelist = *local_desc_list;
(*tail)->next = temp_list;
qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);

View File

@@ -29,33 +29,6 @@
#include <ieee80211.h>
#endif
/**
* dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
* the MSDU Link Descriptor
* @soc: core txrx main context
* @cookie: cookie used to lookup virtual address of link descriptor
* Normally this is just an index into a per SOC array.
*
* This is the VA of the link descriptor, that HAL layer later uses to
* retrieve the list of MSDU's for a given .
*
* Return: void *: Virtual Address of the Rx descriptor
*/
static inline
void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
struct hal_buf_info *buf_info)
{
void *link_desc_va;
/* TODO */
/* Add sanity for cookie */
link_desc_va = soc->link_desc_banks[buf_info->sw_cookie].base_vaddr +
(buf_info->paddr -
soc->link_desc_banks[buf_info->sw_cookie].base_paddr);
return link_desc_va;
}
/**
* dp_rx_frag_handle() - Handles fragmented Rx frames
@@ -128,7 +101,8 @@ dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
for (i = 0; (i < HAL_RX_NUM_MSDU_DESC) && quota--; i++) {
struct dp_rx_desc *rx_desc =
dp_rx_cookie_2_va(soc, msdu_list.sw_cookie[i]);
dp_rx_cookie_2_va_rxdma_buf(soc,
msdu_list.sw_cookie[i]);
qdf_assert(rx_desc);
@@ -470,6 +444,9 @@ dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
uint8_t error, rbm;
struct hal_rx_mpdu_desc_info mpdu_desc_info;
struct hal_buf_info hbi;
struct dp_pdev *dp_pdev;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
/* Debug -- Remove later */
qdf_assert(soc && hal_ring);
@@ -547,7 +524,6 @@ dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
&head, &tail, quota);
continue;
}
/* Return link descriptor through WBM ring (SW2WBM)*/
dp_rx_link_desc_return(soc, ring_desc);
}
@@ -555,10 +531,16 @@ dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
done:
hal_srng_access_end(hal_soc, hal_ring);
/* Assume MAC id = 0, owner = 0 */
if (rx_bufs_used)
dp_rx_buffers_replenish(soc, 0, rx_bufs_used, &head, &tail,
HAL_RX_BUF_RBM_SW3_BM);
if (rx_bufs_used) {
dp_pdev = soc->pdev_list[0];
dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
rx_desc_pool = &soc->rx_desc_buf[0];
dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
rx_bufs_used, &head, &tail, HAL_RX_BUF_RBM_SW3_BM);
}
return rx_bufs_used; /* Assume no scale factor for now */
}
@@ -589,6 +571,9 @@ dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
uint8_t wbm_err_src;
uint32_t rx_buf_cookie;
uint8_t mac_id;
struct dp_pdev *dp_pdev;
struct dp_srng *dp_rxdma_srng;
struct rx_desc_pool *rx_desc_pool;
/* Debug -- Remove later */
qdf_assert(soc && hal_ring);
@@ -636,7 +621,7 @@ dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
rx_desc = dp_rx_cookie_2_va(soc, rx_buf_cookie);
rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
qdf_assert(rx_desc);
/* XXX */
@@ -707,7 +692,7 @@ dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
} else {
/* Should not come here */
rx_buf_cookie = HAL_RX_WBM_BUF_COOKIE_GET(ring_desc);
rx_desc = dp_rx_cookie_2_va(soc, rx_buf_cookie);
rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
qdf_assert(rx_desc);
@@ -739,11 +724,14 @@ dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
done:
hal_srng_access_end(hal_soc, hal_ring);
/* Assume MAC id = 0, owner = 0 */
for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
if (rx_bufs_used[mac_id]) {
dp_rx_buffers_replenish(soc, mac_id,
rx_bufs_used[mac_id],
dp_pdev = soc->pdev_list[mac_id];
dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
rx_desc_pool = &soc->rx_desc_buf[mac_id];
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
rx_desc_pool, rx_bufs_used[mac_id],
&head[mac_id], &tail[mac_id],
HAL_RX_BUF_RBM_SW3_BM);
rx_bufs_reaped += rx_bufs_used[mac_id];

49
dp/wifi3.0/dp_rx_mon.h Normal file
View File

@@ -0,0 +1,49 @@
/*
* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _DP_RX_MON_H_
#define _DP_RX_MON_H_
#ifdef CONFIG_MCL
#include <cds_ieee80211_defines.h>
#endif
/**
* dp_rx_mon_dest_process() - Brain of the Rx processing functionality
* Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
* @soc: core txrx main context 164
* @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
* @quota: No. of units (packets) that can be serviced in one shot.
*
* This function implements the core of Rx functionality. This is
* expected to handle only non-error frames.
*
* Return: uint32_t: No. of elements processed
*/
void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id,
uint32_t quota);
QDF_STATUS dp_rx_pdev_mon_attach(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_mon_detach(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev);
QDF_STATUS dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev);
uint32_t dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota);
#endif

1014
dp/wifi3.0/dp_rx_mon_dest.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,479 @@
/*
* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "dp_types.h"
#include "dp_rx.h"
#include "dp_peer.h"
#include "hal_rx.h"
#include "hal_api.h"
#include "qdf_trace.h"
#include "qdf_nbuf.h"
#include "hal_api_mon.h"
#include "ieee80211.h"
#include "dp_rx_mon.h"
/**
* dp_rx_mon_status_process_tlv() - Process status TLV in status
* buffer on Rx status Queue posted by status SRNG processing.
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids _ring
*
* Return: none
*/
static inline void
dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id) {
struct dp_pdev *pdev = soc->pdev_list[mac_id];
struct hal_rx_ppdu_info *ppdu_info;
qdf_nbuf_t status_nbuf;
uint8_t *rx_tlv;
uint8_t *rx_tlv_start;
uint32_t tlv_status;
#ifdef DP_INTR_POLL_BASED
if (!pdev)
return;
#endif
ppdu_info = &pdev->ppdu_info;
if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
return;
while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
rx_tlv = qdf_nbuf_data(status_nbuf);
rx_tlv_start = rx_tlv;
do {
tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
ppdu_info);
rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE)
break;
} while (tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE);
qdf_nbuf_free(status_nbuf);
if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
/* Temperary */
pdev->mon_ppdu_status =
DP_PPDU_STATUS_START;
break;
}
}
return;
}
/*
* dp_rx_mon_status_srng_process() - Process monitor status ring
* post the status ring buffer to Rx status Queue for later
* processing when status ring is filled with status TLV.
* Allocate a new buffer to status ring if the filled buffer
* is posted.
*
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids
* @quota: No. of ring entry that can be serviced in one shot.
* Return: uint32_t: No. of ring entry that is processed.
*/
static inline uint32_t
dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
uint32_t quota)
{
struct dp_pdev *pdev = soc->pdev_list[mac_id];
void *hal_soc;
void *mon_status_srng;
void *rxdma_mon_status_ring_entry;
QDF_STATUS status;
uint32_t work_done = 0;
#ifdef DP_INTR_POLL_BASED
if (!pdev)
return work_done;
#endif
mon_status_srng = pdev->rxdma_mon_status_ring.hal_srng;
qdf_assert(mon_status_srng);
hal_soc = soc->hal_soc;
qdf_assert(hal_soc);
if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
goto done;
/* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
* BUFFER_ADDR_INFO STRUCT
*/
while (qdf_likely((rxdma_mon_status_ring_entry =
hal_srng_src_peek(hal_soc, mon_status_srng))
&& quota--)) {
uint32_t rx_buf_cookie;
qdf_nbuf_t status_nbuf;
struct dp_rx_desc *rx_desc;
uint8_t *status_buf;
qdf_dma_addr_t paddr;
uint64_t buf_addr;
buf_addr =
(HAL_RX_BUFFER_ADDR_31_0_GET(
rxdma_mon_status_ring_entry) |
((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
rxdma_mon_status_ring_entry)) << 32));
if (qdf_likely(buf_addr)) {
rx_buf_cookie =
HAL_RX_BUF_COOKIE_GET(
rxdma_mon_status_ring_entry);
rx_desc = dp_rx_cookie_2_va_mon_status(soc,
rx_buf_cookie);
qdf_assert(rx_desc);
status_nbuf = rx_desc->nbuf;
qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
QDF_DMA_FROM_DEVICE);
status_buf = qdf_nbuf_data(status_nbuf);
status = hal_get_rx_status_done(status_buf);
if (status != QDF_STATUS_SUCCESS) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_WARN,
"[%s][%d] status not done",
__func__, __LINE__);
break;
}
qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE);
qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
QDF_DMA_FROM_DEVICE);
/* Put the status_nbuf to queue */
qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
} else {
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
struct rx_desc_pool *rx_desc_pool;
uint32_t num_alloc_desc;
rx_desc_pool = &soc->rx_desc_status[mac_id];
num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
rx_desc_pool,
1,
&desc_list,
&tail);
rx_desc = &desc_list->rx_desc;
}
/* Allocate a new skb */
status_nbuf = qdf_nbuf_alloc(pdev->osif_pdev, RX_BUFFER_SIZE,
RX_BUFFER_RESERVATION, RX_BUFFER_ALIGNMENT, FALSE);
status_buf = qdf_nbuf_data(status_nbuf);
hal_clear_rx_status_done(status_buf);
qdf_nbuf_map_single(soc->osdev, status_nbuf,
QDF_DMA_BIDIRECTIONAL);
paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
rx_desc->nbuf = status_nbuf;
hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
rxdma_mon_status_ring_entry =
hal_srng_src_get_next(hal_soc, mon_status_srng);
work_done++;
}
done:
hal_srng_access_end(hal_soc, mon_status_srng);
return work_done;
}
/*
* dp_rx_mon_status_process() - Process monitor status ring and
* TLV in status ring.
*
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids
* @quota: No. of ring entry that can be serviced in one shot.
* Return: uint32_t: No. of ring entry that is processed.
*/
static inline uint32_t
dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
uint32_t work_done;
work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota);
dp_rx_mon_status_process_tlv(soc, mac_id);
return work_done;
}
/**
* dp_mon_process() - Main monitor mode processing roution.
* This call monitor status ring process then monitor
* destination ring process.
* Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids
* @quota: No. of status ring entry that can be serviced in one shot.
* Return: uint32_t: No. of ring entry that is processed.
*/
uint32_t
dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
uint32_t work_done;
work_done = dp_rx_mon_status_process(soc, mac_id, quota);
dp_rx_mon_dest_process(soc, mac_id, quota);
return work_done;
}
/**
* dp_rx_pdev_mon_detach() - detach dp rx for status ring
* @pdev: core txrx pdev context
*
* This function will detach DP RX status ring from
* main device context. will free DP Rx resources for
* status ring
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS
dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev)
{
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_status[pdev_id];
dp_rx_desc_pool_free(soc, pdev_id, rx_desc_pool);
return QDF_STATUS_SUCCESS;
}
/*
* dp_rx_buffers_replenish() - replenish monitor status ring with
* rx nbufs called during dp rx
* monitor status ring initialization
*
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids
* @dp_rxdma_srng: dp monitor status circular ring
* @rx_desc_pool; Pointer to Rx descriptor pool
* @num_req_buffers: number of buffer to be replenished
* @desc_list: list of descs if called from dp rx monitor status
* process or NULL during dp rx initialization or
* out of buffer interrupt
* @tail: tail of descs list
* @owner: who owns the nbuf (host, NSS etc...)
* Return: return success or failure
*/
static inline
QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail,
uint8_t owner)
{
uint32_t num_alloc_desc;
uint16_t num_desc_to_free = 0;
uint32_t num_entries_avail;
uint32_t count;
int sync_hw_ptr = 1;
qdf_dma_addr_t paddr;
qdf_nbuf_t rx_netbuf;
void *rxdma_ring_entry;
union dp_rx_desc_list_elem_t *next;
void *rxdma_srng;
uint8_t *status_buf;
rxdma_srng = dp_rxdma_srng->hal_srng;
qdf_assert(rxdma_srng);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"[%s][%d] requested %d buffers for replenish\n",
__func__, __LINE__, num_req_buffers);
/*
* if desc_list is NULL, allocate the descs from freelist
*/
if (!(*desc_list)) {
num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
rx_desc_pool,
num_req_buffers,
desc_list,
tail);
if (!num_alloc_desc) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"[%s][%d] no free rx_descs in freelist\n",
__func__, __LINE__);
return QDF_STATUS_E_NOMEM;
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"[%s][%d] %d rx desc allocated\n", __func__, __LINE__,
num_alloc_desc);
num_req_buffers = num_alloc_desc;
}
hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
rxdma_srng, sync_hw_ptr);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"[%s][%d] no of availble entries in rxdma ring: %d\n",
__func__, __LINE__, num_entries_avail);
if (num_entries_avail < num_req_buffers) {
num_desc_to_free = num_req_buffers - num_entries_avail;
num_req_buffers = num_entries_avail;
}
for (count = 0; count < num_req_buffers; count++) {
rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
rxdma_srng);
rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
RX_BUFFER_SIZE,
RX_BUFFER_RESERVATION,
RX_BUFFER_ALIGNMENT,
FALSE);
status_buf = qdf_nbuf_data(rx_netbuf);
hal_clear_rx_status_done(status_buf);
memset(status_buf, 0, RX_BUFFER_SIZE);
qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
QDF_DMA_BIDIRECTIONAL);
paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
next = (*desc_list)->next;
(*desc_list)->rx_desc.nbuf = rx_netbuf;
hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
(*desc_list)->rx_desc.cookie, owner);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"[%s][%d] rx_desc=%p, cookie=%d, nbuf=%p, \
status_buf=%p paddr=%p\n",
__func__, __LINE__, &(*desc_list)->rx_desc,
(*desc_list)->rx_desc.cookie, rx_netbuf,
status_buf, (void *)paddr);
*desc_list = next;
}
hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"successfully replenished %d buffers\n", num_req_buffers);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%d rx desc added back to free list\n", num_desc_to_free);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"[%s][%d] desc_list=%p, tail=%p rx_desc=%p, cookie=%d\n",
__func__, __LINE__, desc_list, tail, &(*desc_list)->rx_desc,
(*desc_list)->rx_desc.cookie);
/*
* add any available free desc back to the free list
*/
if (*desc_list) {
dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
mac_id, rx_desc_pool);
}
return QDF_STATUS_SUCCESS;
}
/**
* dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
* @pdev: core txrx pdev context
*
* This function will attach a DP RX monitor status ring into pDEV
* and replenish monitor status ring with buffer.
*
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
QDF_STATUS
dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev) {
uint8_t pdev_id = pdev->pdev_id;
struct dp_soc *soc = pdev->soc;
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
struct dp_srng *rxdma_srng;
uint32_t rxdma_entries;
struct rx_desc_pool *rx_desc_pool;
rxdma_srng = &pdev->rxdma_mon_status_ring;
rxdma_entries = rxdma_srng->alloc_size/hal_srng_get_entrysize(
soc->hal_soc, RXDMA_MONITOR_STATUS);
rx_desc_pool = &soc->rx_desc_status[pdev_id];
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
"%s: Mon RX Status Pool[%d] allocation size=%d\n",
__func__, pdev_id, rxdma_entries);
dp_rx_desc_pool_alloc(soc, pdev_id, rxdma_entries+1, rx_desc_pool);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
"%s: Mon RX Status Buffers Replenish pdev_id=%d\n",
__func__, pdev_id);
dp_rx_mon_status_buffers_replenish(soc, pdev_id, rxdma_srng,
rx_desc_pool, rxdma_entries, &desc_list, &tail,
HAL_RX_BUF_RBM_SW3_BM);
qdf_nbuf_queue_init(&pdev->rx_status_q);
pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
return QDF_STATUS_SUCCESS;
}

View File

@@ -37,6 +37,8 @@
#include <hal_reo.h>
#include "wlan_cfg.h"
#include "hal_rx.h"
#include <hal_api.h>
#include <hal_api_mon.h>
#define MAX_TCL_RING 3
#define MAX_RXDMA_ERRORS 32
@@ -55,6 +57,7 @@
#define DP_FC0_SUBTYPE_QOS 0x80
#define DP_QOS_TID 0x0f
#define DP_IPV6_PRIORITY_SHIFT 20
#define MAX_MON_LINK_DESC_BANKS 2
#if defined(CONFIG_MCL)
#define MAX_PDEV_CNT 1
@@ -132,6 +135,18 @@ enum dp_tx_frm_type {
dp_tx_frm_raw,
};
/**
* struct rx_desc_pool
* @pool_size: number of RX descriptor in the pool
* @array: pointer to array of RX descriptor
* @freelist: pointer to free RX descriptor link list
*/
struct rx_desc_pool {
uint32_t pool_size;
union dp_rx_desc_list_elem_t *array;
union dp_rx_desc_list_elem_t *freelist;
};
/**
* struct dp_tx_ext_desc_elem_s
* @next: next extension descriptor pointer
@@ -489,12 +504,14 @@ struct dp_soc {
/* Tx H/W queues lock */
qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
/* Rx SW descriptor pool */
struct {
uint32_t pool_size;
union dp_rx_desc_list_elem_t *array;
union dp_rx_desc_list_elem_t *freelist;
} rx_desc[MAX_RXDESC_POOLS];
/* Rx SW descriptor pool for RXDMA buffer */
struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
/* Rx SW descriptor pool for RXDMA monitor buffer */
struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
/* Rx SW descriptor pool for RXDMA status buffer */
struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
/* DP rx desc lock */
DP_MUTEX_TYPE rx_desc_mutex[MAX_RXDESC_POOLS];
@@ -681,6 +698,18 @@ struct dp_pdev {
/* RXDMA monitor status ring. TBD: Check format of this ring */
struct dp_srng rxdma_mon_status_ring;
struct dp_srng rxdma_mon_desc_ring;
/* Link descriptor memory banks */
struct {
void *base_vaddr_unaligned;
void *base_vaddr;
qdf_dma_addr_t base_paddr_unaligned;
qdf_dma_addr_t base_paddr;
uint32_t size;
} link_desc_banks[MAX_MON_LINK_DESC_BANKS];
/**
* TODO: See if we need a ring map here for LMAC rings.
* 1. Monitor rings are currently planning to be processed on receiving
@@ -738,6 +767,13 @@ struct dp_pdev {
/* dscp_tid_map_*/
uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
struct hal_rx_ppdu_info ppdu_info;
qdf_nbuf_queue_t rx_status_q;
uint32_t mon_ppdu_id;
uint32_t mon_ppdu_status;
struct cdp_mon_status rx_mon_recv_status;
/* TBD */
};

View File

@@ -1421,10 +1421,17 @@ static inline void hal_rx_msdu_list_get(void *msdu_link_desc,
struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc;
int i;
*num_msdus = 0;
if (*num_msdus > HAL_RX_NUM_MSDU_DESC)
*num_msdus = HAL_RX_NUM_MSDU_DESC;
msdu_details = HAL_RX_LINK_DESC_MSDU0_PTR(msdu_link);
for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"[%s][%d] msdu_link=%p msdu_details=%p\n",
__func__, __LINE__, msdu_link, msdu_details);
for (i = 0; i < *num_msdus; i++) {
msdu_desc_info = HAL_RX_MSDU_DESC_INFO_GET(&msdu_details[i]);
msdu_list->msdu_info[i].msdu_flags =
HAL_RX_MSDU_FLAGS_GET(msdu_desc_info);
@@ -1433,8 +1440,12 @@ static inline void hal_rx_msdu_list_get(void *msdu_link_desc,
msdu_list->sw_cookie[i] =
HAL_RX_BUF_COOKIE_GET(
&msdu_details[i].buffer_addr_info_details);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"[%s][%d] i=%d sw_cookie=%d\n",
__func__, __LINE__, i, msdu_list->sw_cookie[i]);
}
*num_msdus = i;
}
/**
@@ -1498,9 +1509,9 @@ enum hal_rx_reo_buf_type {
* @ HAL_REO_ERR_BAR_FRAME_SN_EQUALS_SSN : A bar received with SSN equal to SN
* @ HAL_REO_ERR_PN_CHECK_FAILED : PN Check Failed packet
* @ HAL_REO_ERR_2K_ERROR_HANDLING_FLAG_SET : Frame is forwarded as a result
* of the <EFBFBD>Seq_2k_error_detected_flag<EFBFBD> been set in the REO Queue descriptor
* of the Seq_2k_error_detected_flag been set in the REO Queue descriptor
* @ HAL_REO_ERR_PN_ERROR_HANDLING_FLAG_SET : Frame is forwarded as a result
* of the <EFBFBD>pn_error_detected_flag<EFBFBD> been set in the REO Queue descriptor
* of the pn_error_detected_flag been set in the REO Queue descriptor
* @ HAL_REO_ERR_QUEUE_DESC_BLOCKED_SET : Frame is forwarded as a result of
* the queue descriptor(address) being blocked as SW/FW seems to be currently
* in the process of making updates to this descriptor

View File

@@ -80,6 +80,7 @@ enum hal_ring_type {
RXDMA_MONITOR_BUF,
RXDMA_MONITOR_STATUS,
RXDMA_MONITOR_DST,
RXDMA_MONITOR_DESC,
MAX_RING_TYPES
};
@@ -535,10 +536,11 @@ static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
/* For LMAC rings, ring pointer updates are done through FW and
* hence written to a shared memory location that is read by FW
*/
if (srng->ring_dir == HAL_SRNG_SRC_RING)
if (srng->ring_dir == HAL_SRNG_SRC_RING) {
*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
else
*(srng->u.src_ring.tp_addr) = srng->u.dst_ring.tp;
} else {
*(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
}
} else {
if (srng->ring_dir == HAL_SRNG_SRC_RING)
hif_write32_mb(srng->u.src_ring.hp_addr,

500
hal/wifi3.0/hal_api_mon.h Normal file
View File

@@ -0,0 +1,500 @@
/*
* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _HAL_API_MON_H_
#define _HAL_API_MON_H_
#include "qdf_types.h"
#include "hal_internal.h"
#define HAL_RX_OFFSET(block, field) block##_##field##_OFFSET
#define HAL_RX_LSB(block, field) block##_##field##_LSB
#define HAL_RX_MASk(block, field) block##_##field##_MASK
#define HAL_RX_GET(_ptr, block, field) \
(((*((volatile uint32_t *)_ptr + (HAL_RX_OFFSET(block, field)>>2))) & \
HAL_RX_MASk(block, field)) >> \
HAL_RX_LSB(block, field))
#define HAL_RX_PHY_DATA_RADAR 0x01
#define HAL_RX_FCS_LEN (4)
#define KEY_EXTIV 0x20
#define HAL_RX_USER_TLV32_TYPE_OFFSET 0x00000000
#define HAL_RX_USER_TLV32_TYPE_LSB 1
#define HAL_RX_USER_TLV32_TYPE_MASK 0x000003FE
#define HAL_RX_USER_TLV32_LEN_OFFSET 0x00000000
#define HAL_RX_USER_TLV32_LEN_LSB 10
#define HAL_RX_USER_TLV32_LEN_MASK 0x003FFC00
#define HAL_RX_USER_TLV32_USERID_OFFSET 0x00000000
#define HAL_RX_USER_TLV32_USERID_LSB 26
#define HAL_RX_USER_TLV32_USERID_MASK 0xFC000000
#define HAL_ALIGN(x, a) HAL_ALIGN_MASK(x, (a)-1)
#define HAL_ALIGN_MASK(x, mask) (typeof(x))(((uint32)(x) + (mask)) & ~(mask))
#define HAL_RX_TLV32_HDR_SIZE 4
#define HAL_RX_GET_USER_TLV32_TYPE(rx_status_tlv_ptr) \
((*((uint32_t *)(rx_status_tlv_ptr)) & \
HAL_RX_USER_TLV32_TYPE_MASK) >> \
HAL_RX_USER_TLV32_TYPE_LSB)
#define HAL_RX_GET_USER_TLV32_LEN(rx_status_tlv_ptr) \
((*((uint32_t *)(rx_status_tlv_ptr)) & \
HAL_RX_USER_TLV32_LEN_MASK) >> \
HAL_RX_USER_TLV32_LEN_LSB)
#define HAL_RX_GET_USER_TLV32_USERID(rx_status_tlv_ptr) \
((*((uint32_t *)(rx_status_tlv_ptr)) & \
HAL_RX_USER_TLV32_USERID_MASK) >> \
HAL_RX_USER_TLV32_USERID_LSB)
#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
#define HAL_TLV_STATUS_PPDU_DONE 1
#define HAL_TLV_STATUS_DUMMY 2
#define HAL_MAX_UL_MU_USERS 8
enum {
HAL_HW_RX_DECAP_FORMAT_RAW = 0,
HAL_HW_RX_DECAP_FORMAT_NWIFI,
HAL_HW_RX_DECAP_FORMAT_ETH2,
HAL_HW_RX_DECAP_FORMAT_8023,
};
enum {
DP_PPDU_STATUS_START,
DP_PPDU_STATUS_DONE,
};
static inline
uint32_t HAL_RX_MON_HW_RX_DESC_SIZE(void)
{
/* return the HW_RX_DESC size */
return sizeof(struct rx_pkt_tlvs);
}
static inline
uint8_t *HAL_RX_MON_DEST_GET_DESC(uint8_t *data)
{
return data;
}
static inline
uint32_t HAL_RX_DESC_GET_MPDU_LENGTH_ERR(void *hw_desc_addr)
{
struct rx_attention *rx_attn;
struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr;
rx_attn = &rx_desc->attn_tlv.rx_attn;
return HAL_RX_GET(rx_attn, RX_ATTENTION_1, MPDU_LENGTH_ERR);
}
static inline
uint32_t HAL_RX_DESC_GET_MPDU_FCS_ERR(void *hw_desc_addr)
{
struct rx_attention *rx_attn;
struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr;
rx_attn = &rx_desc->attn_tlv.rx_attn;
return HAL_RX_GET(rx_attn, RX_ATTENTION_1, FCS_ERR);
}
static inline
uint32_t
HAL_RX_DESC_GET_DECAP_FORMAT(void *hw_desc_addr) {
struct rx_msdu_start *rx_msdu_start;
struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr;
rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start;
return HAL_RX_GET(rx_msdu_start, RX_MSDU_START_2, DECAP_FORMAT);
}
static inline
uint8_t *
HAL_RX_DESC_GET_80211_HDR(void *hw_desc_addr) {
uint8_t *rx_pkt_hdr;
struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr;
rx_pkt_hdr = &rx_desc->pkt_hdr_tlv.rx_pkt_hdr[0];
return rx_pkt_hdr;
}
static inline
uint32_t HAL_RX_MON_HW_DESC_GET_PPDUID_GET(void *hw_desc_addr)
{
struct rx_attention *rx_attn;
struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr;
rx_attn = &rx_desc->attn_tlv.rx_attn;
return HAL_RX_GET(rx_attn, RX_ATTENTION_0, PHY_PPDU_ID);
}
#define HAL_RX_BUFFER_ADDR_31_0_GET(buff_addr_info) \
(_HAL_MS((*_OFFSET_TO_WORD_PTR(buff_addr_info, \
BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET)), \
BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK, \
BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB))
#define HAL_RX_REO_ENT_BUFFER_ADDR_39_32_GET(reo_ent_desc) \
(HAL_RX_BUFFER_ADDR_39_32_GET(& \
(((struct reo_entrance_ring *)reo_ent_desc) \
->reo_level_mpdu_frame_info.msdu_link_desc_addr_info)))
#define HAL_RX_REO_ENT_BUFFER_ADDR_31_0_GET(reo_ent_desc) \
(HAL_RX_BUFFER_ADDR_31_0_GET(& \
(((struct reo_entrance_ring *)reo_ent_desc) \
->reo_level_mpdu_frame_info.msdu_link_desc_addr_info)))
#define HAL_RX_REO_ENT_BUF_COOKIE_GET(reo_ent_desc) \
(HAL_RX_BUF_COOKIE_GET(& \
(((struct reo_entrance_ring *)reo_ent_desc) \
->reo_level_mpdu_frame_info.msdu_link_desc_addr_info)))
/**
* hal_rx_reo_ent_buf_paddr_get: Gets the physical address and
* cookie from the REO entrance ring element
*
* @ hal_rx_desc_cookie: Opaque cookie pointer used by HAL to get to
* the current descriptor
* @ buf_info: structure to return the buffer information
* @ msdu_cnt: pointer to msdu count in MPDU
* Return: void
*/
static inline
void hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
struct hal_buf_info *buf_info,
void **pp_buf_addr_info,
uint32_t *msdu_cnt
)
{
struct reo_entrance_ring *reo_ent_ring =
(struct reo_entrance_ring *)rx_desc;
struct buffer_addr_info *buf_addr_info;
struct rx_mpdu_desc_info *rx_mpdu_desc_info_details;
uint32_t loop_cnt;
rx_mpdu_desc_info_details =
&reo_ent_ring->reo_level_mpdu_frame_info.rx_mpdu_desc_info_details;
*msdu_cnt = HAL_RX_GET(rx_mpdu_desc_info_details,
RX_MPDU_DESC_INFO_0, MSDU_COUNT);
loop_cnt = HAL_RX_GET(reo_ent_ring, REO_ENTRANCE_RING_7, LOOPING_COUNT);
buf_addr_info =
&reo_ent_ring->reo_level_mpdu_frame_info.msdu_link_desc_addr_info;
buf_info->paddr =
(HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) |
((uint64_t)
(HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32));
buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"[%s][%d] ReoAddr=%p, addrInfo=%p, paddr=0x%llx, loopcnt=%d\n",
__func__, __LINE__, reo_ent_ring, buf_addr_info,
(unsigned long long)buf_info->paddr, loop_cnt);
*pp_buf_addr_info = (void *)buf_addr_info;
}
static inline
void hal_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
struct hal_buf_info *buf_info, void **pp_buf_addr_info)
{
struct rx_msdu_link *msdu_link =
(struct rx_msdu_link *)rx_msdu_link_desc;
struct buffer_addr_info *buf_addr_info;
buf_addr_info = &msdu_link->next_msdu_link_desc_addr_info;
buf_info->paddr =
(HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) |
((uint64_t)
(HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32));
buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info);
*pp_buf_addr_info = (void *)buf_addr_info;
}
/**
* hal_rx_msdu_link_desc_set: Retrieves MSDU Link Descriptor to WBM
*
* @ soc : HAL version of the SOC pointer
* @ src_srng_desc : void pointer to the WBM Release Ring descriptor
* @ buf_addr_info : void pointer to the buffer_addr_info
*
* Return: void
*/
static inline void hal_rx_mon_msdu_link_desc_set(struct hal_soc *soc,
void *src_srng_desc, void *buf_addr_info)
{
struct buffer_addr_info *wbm_srng_buffer_addr_info =
(struct buffer_addr_info *)src_srng_desc;
uint64_t paddr;
struct buffer_addr_info *p_buffer_addr_info =
(struct buffer_addr_info *)buf_addr_info;
paddr =
(HAL_RX_BUFFER_ADDR_31_0_GET(buf_addr_info) |
((uint64_t)
(HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32));
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"[%s][%d] src_srng_desc=%p, buf_addr=0x%llx, cookie=0x%llx\n",
__func__, __LINE__, src_srng_desc, (unsigned long long)paddr,
(unsigned long long)p_buffer_addr_info->sw_buffer_cookie);
/* Structure copy !!! */
*wbm_srng_buffer_addr_info =
*((struct buffer_addr_info *)buf_addr_info);
}
static inline
uint32 hal_get_rx_msdu_link_desc_size(void)
{
return sizeof(struct rx_msdu_link);
}
enum {
HAL_PKT_TYPE_OFDM = 0,
HAL_CDP_PKT_TYPE_CCK,
HAL_PKT_TYPE_HT,
HAL_PKT_TYPE_VHT,
HAL_PKT_TYPE_HE,
};
enum {
HAL_SGI_0_8_US,
HAL_SGI_0_4_US,
HAL_SGI_1_6_US,
HAL_SGI_3_2_US,
};
enum {
HAL_FULL_RX_BW_20,
HAL_FULL_RX_BW_40,
HAL_FULL_RX_BW_80,
HAL_FULL_RX_BW_160,
};
enum {
HAL_RX_TYPE_SU,
HAL_RX_TYPE_MU_MIMO,
HAL_RX_TYPE_MU_OFDMA,
HAL_RX_TYPE_MU_OFDMA_MIMO,
};
static inline
void HAL_RX_MON_HW_DESC_GET_PPDU_START_STATUS(void *hw_desc_addr,
struct cdp_mon_status *rs)
{
struct rx_msdu_start *rx_msdu_start;
struct rx_pkt_tlvs *rx_desc = (struct rx_pkt_tlvs *)hw_desc_addr;
uint32_t rx_pream_type;
uint32_t rx_sgi;
uint32_t rx_type;
uint32_t rx_bw;
static uint32_t pkt_type_hw_to_cdp[] = {
CDP_PKT_TYPE_OFDM,
CDP_PKT_TYPE_CCK,
CDP_PKT_TYPE_HT,
CDP_PKT_TYPE_VHT,
CDP_PKT_TYPE_HE,
};
static uint32_t sgi_hw_to_cdp[] = {
CDP_SGI_0_8_US,
CDP_SGI_0_4_US,
CDP_SGI_1_6_US,
CDP_SGI_3_2_US,
};
static uint32_t rx_type_hw_to_cdp[] = {
CDP_RX_TYPE_SU,
CDP_RX_TYPE_MU_MIMO,
CDP_RX_TYPE_MU_OFDMA,
CDP_RX_TYPE_MU_OFDMA_MIMO,
};
static uint32_t rx_bw_hw_to_cdp[] = {
CDP_FULL_RX_BW_20,
CDP_FULL_RX_BW_40,
CDP_FULL_RX_BW_80,
CDP_FULL_RX_BW_160,
};
rx_msdu_start = &rx_desc->msdu_start_tlv.rx_msdu_start;
rs->cdp_rs_tstamp.cdp_tsf = rx_msdu_start->ppdu_start_timestamp;
rx_pream_type = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, PKT_TYPE);
rs->cdp_rs_pream_type = pkt_type_hw_to_cdp[rx_pream_type];
rs->cdp_rs_user_rssi = HAL_RX_GET(rx_msdu_start,
RX_MSDU_START_5, USER_RSSI);
rs->cdp_rs_stbc = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, STBC);
rx_sgi = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, SGI);
rs->cdp_rs_sgi = sgi_hw_to_cdp[rx_sgi];
rs->cdf_rs_rate_mcs = HAL_RX_GET(rx_msdu_start,
RX_MSDU_START_5, RATE_MCS);
rx_type = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEPTION_TYPE);
rs->cdp_rs_reception_type = rx_type_hw_to_cdp[rx_type];
rx_bw = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, RECEIVE_BANDWIDTH);
rs->cdp_rs_bw = rx_bw_hw_to_cdp[rx_bw];
rs->cdp_rs_nss = HAL_RX_GET(rx_msdu_start, RX_MSDU_START_5, NSS);
}
struct hal_rx_ppdu_user_info {
};
struct hal_rx_ppdu_common_info {
uint32_t ppdu_id;
uint32_t ppdu_timestamp;
};
struct hal_rx_ppdu_info {
struct hal_rx_ppdu_common_info com_info;
struct hal_rx_ppdu_user_info user_info[HAL_MAX_UL_MU_USERS];
};
static inline uint32_t
hal_get_rx_status_buf_size(void) {
/* RX status buffer size is hard coded for now */
return 2048;
}
static inline uint8_t*
hal_rx_status_get_next_tlv(uint8_t *rx_tlv) {
uint32_t tlv_len;
tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv);
return (uint8_t *)(((unsigned long)(rx_tlv + tlv_len +
HAL_RX_TLV32_HDR_SIZE + 3)) & (~((unsigned long)3)));
}
static inline uint32_t
hal_rx_status_get_tlv_info(void *rx_tlv, struct hal_rx_ppdu_info *ppdu_info)
{
uint32_t tlv_tag, user_id, tlv_len;
tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv);
user_id = HAL_RX_GET_USER_TLV32_USERID(rx_tlv);
tlv_len = HAL_RX_GET_USER_TLV32_LEN(rx_tlv);
rx_tlv = (uint8_t *) rx_tlv + HAL_RX_TLV32_HDR_SIZE;
switch (tlv_tag) {
case WIFIRX_PPDU_START_E:
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"[%s][%d] ppdu_start_e len=%d\n",
__func__, __LINE__, tlv_len);
ppdu_info->com_info.ppdu_id =
HAL_RX_GET(rx_tlv, RX_PPDU_START_0,
PHY_PPDU_ID);
ppdu_info->com_info.ppdu_timestamp =
HAL_RX_GET(rx_tlv, RX_PPDU_START_2,
PPDU_START_TIMESTAMP);
break;
case WIFIRX_PPDU_START_USER_INFO_E:
break;
case WIFIRX_PPDU_END_E:
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"[%s][%d] ppdu_end_e len=%d\n",
__func__, __LINE__, tlv_len);
break;
case WIFIRXPCU_PPDU_END_INFO_E:
break;
case WIFIRX_PPDU_END_USER_STATS_E:
break;
case WIFIRX_PPDU_END_USER_STATS_EXT_E:
break;
case WIFIRX_PPDU_END_STATUS_DONE_E:
return HAL_TLV_STATUS_PPDU_DONE;
case WIFIDUMMY_E:
return HAL_TLV_STATUS_DUMMY;
case 0:
return HAL_TLV_STATUS_PPDU_DONE;
default:
break;
}
return HAL_TLV_STATUS_PPDU_NOT_DONE;
}
static inline
uint32_t hal_get_rx_status_done_tlv_size(void *hal_soc)
{
return HAL_RX_TLV32_HDR_SIZE;
}
static inline QDF_STATUS
hal_get_rx_status_done(uint8_t *rx_tlv)
{
uint32_t tlv_tag;
tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv);
if (tlv_tag == WIFIRX_STATUS_BUFFER_DONE_E)
return QDF_STATUS_SUCCESS;
else
return QDF_STATUS_E_EMPTY;
}
static inline QDF_STATUS
hal_clear_rx_status_done(uint8_t *rx_tlv)
{
*(uint32_t *)rx_tlv = 0;
return QDF_STATUS_SUCCESS;
}
#endif

View File

@@ -58,6 +58,10 @@
#include "rx_msdu_start.h"
#include "rx_msdu_end.h"
#include "rx_attention.h"
#include "rx_ppdu_start.h"
#include "rx_ppdu_start_user_info.h"
#include "rx_ppdu_end_user_stats.h"
#include "rx_ppdu_end_user_stats_ext.h"
#include "tx_msdu_extension.h"
#include "wcss_version.h"
#include "pld_common.h"
@@ -139,7 +143,8 @@ enum hal_srng_ring_id {
HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF = 132,
HAL_SRNG_WMAC1_RXDMA2SW0 = 133,
HAL_SRNG_WMAC1_RXDMA2SW1 = 134,
/* 135-142 unused */
HAL_SRNG_WMAC1_SW2RXDMA1_DESC = 135,
/* 136-142 unused */
HAL_SRNG_LMAC1_ID_END = 143
};

View File

@@ -485,6 +485,18 @@ static struct hal_hw_srng_config hw_srng_table[] = {
.reg_start = {},
.reg_size = {},
},
{ /* RXDMA_MONITOR_DESC */
.start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_DESC,
.max_rings = 1,
.entry_size = sizeof(struct wbm_buffer_ring) >> 2,
.lmac_ring = TRUE,
.ring_dir = HAL_SRNG_SRC_RING,
/* reg_start is not set because LMAC rings are not accessed
* from host
*/
.reg_start = {},
.reg_size = {},
},
};
/**

View File

@@ -362,13 +362,11 @@ qdf_nbuf_unmap_nbytes(qdf_device_t osdev,
__qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
}
#ifndef REMOVE_INIT_DEBUG_CODE
static inline void
qdf_nbuf_sync_for_cpu(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
{
__qdf_nbuf_sync_for_cpu(osdev, buf, dir);
}
#endif
static inline QDF_STATUS
qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)

View File

@@ -523,10 +523,10 @@ QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
qdf_dma_dir_t dir, int nbytes);
void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
qdf_dma_dir_t dir, int nbytes);
#ifndef REMOVE_INIT_DEBUG_CODE
void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
qdf_dma_dir_t dir);
#endif
QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
void __qdf_nbuf_unmap_nbytes_single(

View File

@@ -2436,7 +2436,6 @@ __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
EXPORT_SYMBOL(__qdf_nbuf_dmamap_set_cb);
#ifndef REMOVE_INIT_DEBUG_CODE
/**
* __qdf_nbuf_sync_single_for_cpu() - nbuf sync
* @osdev: os device
@@ -2485,7 +2484,6 @@ __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
}
EXPORT_SYMBOL(__qdf_nbuf_sync_for_cpu);
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
/**