Merge "qcacmn: Set DBS scan if ndp peers are active"

This commit is contained in:
Linux Build Service Account
2020-02-13 04:38:15 -08:00
committed by Gerrit - the friendly Code Review server
23 changed files with 2289 additions and 482 deletions

View File

@@ -79,6 +79,7 @@
#define CDP_BUNDLE_STATS 23
#define CDP_CREDIT_STATS 24
#define CDP_DISCONNECT_STATS 25
#define CDP_DP_RX_FISA_STATS 26
#define WME_AC_TO_TID(_ac) ( \
((_ac) == WME_AC_VO) ? 6 : \
@@ -284,6 +285,7 @@ enum cdp_host_txrx_stats {
TXRX_PDEV_CFG_PARAMS = 10,
TXRX_NAPI_STATS = 11,
TXRX_SOC_INTERRUPT_STATS = 12,
TXRX_SOC_FSE_STATS = 13,
TXRX_HOST_STATS_MAX,
};
@@ -729,6 +731,11 @@ typedef bool (*ol_txrx_tx_flow_control_is_pause_fp)(void *osif_dev);
*/
typedef QDF_STATUS(*ol_txrx_rx_fp)(void *osif_dev, qdf_nbuf_t msdu_list);
typedef QDF_STATUS(*ol_txrx_fisa_rx_fp)(void *soc,
void *dp_vdev,
qdf_nbuf_t msdu_list);
typedef QDF_STATUS(*ol_txrx_fisa_flush_fp)(void *soc, int ring_num);
/**
* ol_txrx_rx_flush_fp - receive function to hand batches of data
* frames from txrx to OS shim
@@ -905,6 +912,8 @@ struct ol_txrx_ops {
ol_txrx_rx_mon_fp mon;
ol_txrx_stats_rx_fp stats_rx;
ol_txrx_rsim_rx_decap_fp rsim_rx_decap;
ol_txrx_fisa_rx_fp osif_fisa_rx;
ol_txrx_fisa_flush_fp osif_fisa_flush;
} rx;
/* proxy arp function pointer - specified by OS shim, stored by txrx */
ol_txrx_proxy_arp_fp proxy_arp;
@@ -2234,6 +2243,14 @@ struct cdp_peer_cookie {
uint8_t cookie;
};
#ifdef WLAN_SUPPORT_RX_FISA
struct cdp_flow_stats {
uint32_t aggr_count;
uint32_t curr_aggr_count;
uint32_t flush_count;
uint32_t bytes_aggregated;
};
#else
/**
* cdp_flow_stats - Per-Flow (5-tuple) statistics
* @msdu_count: number of rx msdus matching this flow
@@ -2244,6 +2261,7 @@ struct cdp_peer_cookie {
struct cdp_flow_stats {
uint32_t msdu_count;
};
#endif
/**
* cdp_flow_fst_operation - RX FST operations allowed
@@ -2278,6 +2296,9 @@ enum cdp_flow_protocol_type {
* @l4_protocol: protocol type in flow (TCP/UDP)
*/
struct cdp_rx_flow_tuple_info {
#ifdef WLAN_SUPPORT_RX_FISA
uint8_t tuple_populated;
#endif
uint32_t dest_ip_127_96;
uint32_t dest_ip_95_64;
uint32_t dest_ip_63_32;

View File

@@ -4679,15 +4679,15 @@ dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
qdf_nbuf_data(msg),
qdf_nbuf_len(msg),
soc->htc_endpoint,
1); /* tag - not relevant here */
HTC_TX_PACKET_TAG_RUNTIME_PUT);
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
htt_logger_bufp);
qdf_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
fse_setup_info->pdev_id);
dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
fse_setup_info->pdev_id);
QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
(void *)fse_setup_info->hash_key,
fse_setup_info->hash_key_len);
@@ -4731,6 +4731,7 @@ dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
if (!qdf_nbuf_put_tail(msg,
sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
qdf_nbuf_free(msg);
return QDF_STATUS_E_FAILURE;
}
@@ -4822,15 +4823,121 @@ dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
qdf_nbuf_data(msg),
qdf_nbuf_len(msg),
soc->htc_endpoint,
1); /* tag - not relevant here */
HTC_TX_PACKET_TAG_RUNTIME_PUT);
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
htt_logger_bufp);
qdf_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
fse_op_info->pdev_id);
dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
fse_op_info->pdev_id);
return QDF_STATUS_SUCCESS;
}
/**
* dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
* @pdev: DP pdev handle
* @fse_op_info: Flow entry parameters
*
* Return: Success when HTT message is sent, error on failure
*/
QDF_STATUS
dp_htt_rx_fisa_config(struct dp_pdev *pdev,
struct dp_htt_rx_fisa_cfg *fisa_config)
{
struct htt_soc *soc = pdev->soc->htt_handle;
struct dp_htt_htc_pkt *pkt;
qdf_nbuf_t msg;
u_int32_t *msg_word;
struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
uint8_t *htt_logger_bufp;
uint32_t len;
len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
msg = qdf_nbuf_alloc(soc->osdev,
len,
/* reserve room for the HTC header */
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
4,
TRUE);
if (!msg)
return QDF_STATUS_E_NOMEM;
/*
* Set the length of the message.
* The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
* separately during the below call to qdf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
if (!qdf_nbuf_put_tail(msg,
sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
qdf_nbuf_free(msg);
return QDF_STATUS_E_FAILURE;
}
/* fill in the message contents */
msg_word = (u_int32_t *)qdf_nbuf_data(msg);
memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
/* rewind beyond alignment pad to get to the HTC header reserved area */
qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
htt_logger_bufp = (uint8_t *)msg_word;
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
msg_word++;
HTT_RX_FISA_CONFIG_FISA_ENABLE_SET(*msg_word, 1);
HTT_RX_FISA_CONFIG_IPSEC_SKIP_SEARCH_SET(*msg_word, 1);
HTT_RX_FISA_CONFIG_NON_TCP_SKIP_SEARCH_SET(*msg_word, 0);
HTT_RX_FISA_CONFIG_ADD_IPV4_FIXED_HDR_LEN_SET(*msg_word, 0);
HTT_RX_FISA_CONFIG_ADD_IPV6_FIXED_HDR_LEN_SET(*msg_word, 0);
HTT_RX_FISA_CONFIG_ADD_TCP_FIXED_HDR_LEN_SET(*msg_word, 0);
HTT_RX_FISA_CONFIG_ADD_UDP_HDR_LEN_SET(*msg_word, 0);
HTT_RX_FISA_CONFIG_CHKSUM_CUM_IP_LEN_EN_SET(*msg_word, 1);
HTT_RX_FISA_CONFIG_DISABLE_TID_CHECK_SET(*msg_word, 1);
HTT_RX_FISA_CONFIG_DISABLE_TA_CHECK_SET(*msg_word, 1);
HTT_RX_FISA_CONFIG_DISABLE_QOS_CHECK_SET(*msg_word, 1);
HTT_RX_FISA_CONFIG_DISABLE_RAW_CHECK_SET(*msg_word, 1);
HTT_RX_FISA_CONFIG_DISABLE_DECRYPT_ERR_CHECK_SET(*msg_word, 1);
HTT_RX_FISA_CONFIG_DISABLE_MSDU_DROP_CHECK_SET(*msg_word, 1);
HTT_RX_FISA_CONFIG_FISA_AGGR_LIMIT_SET(*msg_word, 0xf);
msg_word++;
htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
pkt = htt_htc_pkt_alloc(soc);
if (!pkt) {
qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
qdf_assert(0);
qdf_nbuf_free(msg);
return QDF_STATUS_E_RESOURCES; /* failure */
}
pkt->soc_ctxt = NULL; /* not used during send-done callback */
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
dp_htt_h2t_send_complete_free_netbuf,
qdf_nbuf_data(msg),
qdf_nbuf_len(msg),
soc->htc_endpoint,
HTC_TX_PACKET_TAG_RUNTIME_PUT);
SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
htt_logger_bufp);
dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
fisa_config->pdev_id);
return QDF_STATUS_SUCCESS;
}

View File

@@ -289,6 +289,19 @@ struct dp_htt_rx_flow_fst_operation {
struct cdp_rx_flow_info *rx_flow;
};
/**
* struct dp_htt_rx_fisa_config - Rx fisa config
* @pdev_id: DP Pdev identifier
* @fisa_timeout: fisa aggregation timeout
*/
struct dp_htt_rx_fisa_cfg {
uint8_t pdev_id;
uint32_t fisa_timeout;
};
QDF_STATUS dp_htt_rx_fisa_config(struct dp_pdev *pdev,
struct dp_htt_rx_fisa_cfg *fisa_config);
/*
* htt_soc_initialize() - SOC level HTT initialization
* @htt_soc: Opaque htt SOC handle

View File

@@ -1875,7 +1875,7 @@ struct dp_soc *cdp_soc_t_to_dp_soc(struct cdp_soc_t *psoc)
return (struct dp_soc *)psoc;
}
#ifdef WLAN_SUPPORT_RX_FLOW_TAG
#if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
/**
* dp_rx_flow_update_fse_stats() - Update a flow's statistics
* @pdev: pdev handle
@@ -1934,7 +1934,8 @@ void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev);
*/
QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc,
struct dp_pdev *pdev);
#else
#else /* !((WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)) */
/**
* dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
* @soc: SoC handle
@@ -1959,7 +1960,7 @@ static inline
void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev)
{
}
#endif /* WLAN_SUPPORT_RX_FLOW_TAG */
#endif
/**
* dp_get_vdev_from_soc_vdev_id_wifi3() - Returns vdev object given the vdev id
@@ -2030,4 +2031,7 @@ void dp_is_hw_dbs_enable(struct dp_soc *soc,
int *max_mac_rings);
#if defined(WLAN_SUPPORT_RX_FISA)
void dp_rx_dump_fisa_table(struct dp_soc *soc);
#endif /* WLAN_SUPPORT_RX_FISA */
#endif /* #ifndef _DP_INTERNAL_H_ */

View File

@@ -46,6 +46,9 @@
#include "dp_rx_mon.h"
#include "htt_stats.h"
#include "dp_htt.h"
#ifdef WLAN_SUPPORT_RX_FISA
#include <dp_fisa_rx.h>
#endif
#include "htt_ppdu_stats.h"
#include "qdf_mem.h" /* qdf_mem_malloc,free */
#include "cfg_ucfg_api.h"
@@ -314,6 +317,7 @@ const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
};
/* MCL specific functions */
@@ -4916,20 +4920,57 @@ dp_rx_target_fst_config(struct dp_soc *soc)
}
return status;
}
#else
#elif defined(WLAN_SUPPORT_RX_FISA)
/**
* dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
* @soc: SoC handle
*
* Return: Success
*/
static inline QDF_STATUS
dp_rx_target_fst_config(struct dp_soc *soc)
static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
{
/* Check if it is enabled in the INI */
if (!soc->fisa_enable) {
dp_err("RX FISA feature is disabled");
return QDF_STATUS_E_NOSUPPORT;
}
return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
}
#define FISA_MAX_TIMEOUT 0xffffffff
#define FISA_DISABLE_TIMEOUT 0
static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
{
struct dp_htt_rx_fisa_cfg fisa_config;
fisa_config.pdev_id = 0;
fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
}
#else /* !WLAN_SUPPORT_RX_FISA */
static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
{
return QDF_STATUS_SUCCESS;
}
#endif /* !WLAN_SUPPORT_RX_FISA */
#ifndef WLAN_SUPPORT_RX_FISA
static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
{
return QDF_STATUS_SUCCESS;
}
#endif /* WLAN_SUPPORT_RX_FLOW_TAG */
static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
{
return QDF_STATUS_SUCCESS;
}
static void dp_rx_dump_fisa_table(struct dp_soc *soc)
{
}
#endif /* !WLAN_SUPPORT_RX_FISA */
/*
* dp_soc_attach_target_wifi3() - SOC initialization in the target
@@ -4958,11 +4999,20 @@ dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
}
status = dp_rx_target_fst_config(soc);
if (status != QDF_STATUS_SUCCESS) {
if (status != QDF_STATUS_SUCCESS &&
status != QDF_STATUS_E_NOSUPPORT) {
dp_err("Failed to send htt fst setup config message to target");
return status;
}
if (status == QDF_STATUS_SUCCESS) {
status = dp_rx_fisa_config(soc);
if (status != QDF_STATUS_SUCCESS) {
dp_err("Failed to send htt FISA config message to target");
return status;
}
}
DP_STATS_INIT(soc);
/* initialize work queue for stats processing */
@@ -5122,6 +5172,8 @@ static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc,
vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
vdev->osif_get_key = txrx_ops->get_key;
vdev->osif_rx_mon = txrx_ops->rx.mon;
vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
@@ -7462,6 +7514,7 @@ static void dp_txrx_stats_help(void)
dp_info(" 28 -- Host REO Queue Statistics");
dp_info(" 29 -- Host Soc cfg param Statistics");
dp_info(" 30 -- Host pdev cfg param Statistics");
dp_info(" 31 -- Host FISA stats");
}
/**
@@ -7524,6 +7577,8 @@ dp_print_host_stats(struct dp_vdev *vdev,
case TXRX_SOC_INTERRUPT_STATS:
dp_print_soc_interrupt_stats(pdev->soc);
break;
case TXRX_SOC_FSE_STATS:
dp_rx_dump_fisa_table(pdev->soc);
default:
dp_info("Wrong Input For TxRx Host Stats");
dp_txrx_stats_help();
@@ -8974,6 +9029,10 @@ static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
/* TODO: NOT IMPLEMENTED */
break;
case CDP_DP_RX_FISA_STATS:
dp_rx_dump_fisa_stats(soc);
break;
default:
status = QDF_STATUS_E_INVAL;
break;

View File

@@ -1348,7 +1348,13 @@ void dp_rx_deliver_to_stack(struct dp_soc *soc,
vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
&nbuf_tail, peer->mac_addr.raw);
}
vdev->osif_rx(vdev->osif_vdev, nbuf_head);
/* Function pointer initialized only when FISA is enabled */
if (vdev->osif_fisa_rx)
/* on failure send it via regular path */
vdev->osif_fisa_rx(soc, vdev, nbuf_head);
else
vdev->osif_rx(vdev->osif_vdev, nbuf_head);
}
/**
@@ -1739,6 +1745,28 @@ uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
return num_pending;
}
#ifdef WLAN_SUPPORT_RX_FISA
/*
* dp_rx_skip_tlvs() - Skip TLVs only if FISA is not enabled
* @vdev: DP vdev context
* @nbuf: nbuf whose data pointer is adjusted
* @size: size to be adjusted
*
* Return: None
*/
static void dp_rx_skip_tlvs(struct dp_vdev *vdev, qdf_nbuf_t nbuf, int size)
{
/* TLVs include FISA info do not skip them yet */
if (!vdev->osif_fisa_rx)
qdf_nbuf_pull_head(nbuf, size);
}
#else /* !WLAN_SUPPORT_RX_FISA */
static void dp_rx_skip_tlvs(struct dp_vdev *vdev, qdf_nbuf_t nbuf, int size)
{
qdf_nbuf_pull_head(nbuf, size);
}
#endif /* !WLAN_SUPPORT_RX_FISA */
/**
* dp_rx_process() - Brain of the Rx processing functionality
* Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
@@ -2185,9 +2213,8 @@ done:
RX_PKT_TLVS_LEN;
qdf_nbuf_set_pktlen(nbuf, pkt_len);
qdf_nbuf_pull_head(nbuf,
RX_PKT_TLVS_LEN +
msdu_metadata.l3_hdr_pad);
dp_rx_skip_tlvs(vdev, nbuf, RX_PKT_TLVS_LEN +
msdu_metadata.l3_hdr_pad);
}
/*
@@ -2355,6 +2382,9 @@ done:
}
}
if (vdev && vdev->osif_fisa_flush)
vdev->osif_fisa_flush(soc, reo_ring_num);
if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
vdev->osif_gro_flush(vdev->osif_vdev,
reo_ring_num);

View File

@@ -1245,13 +1245,17 @@ struct dp_soc {
* invalidation bug is enabled or not
*/
bool is_rx_fse_full_cache_invalidate_war_enabled;
#ifdef WLAN_SUPPORT_RX_FLOW_TAG
#if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
/**
* Pointer to DP RX Flow FST at SOC level if
* is_rx_flow_search_table_per_pdev is false
* TBD: rx_fst[num_macs] if we decide to have per mac FST
*/
struct dp_rx_fst *rx_fst;
#endif /* WLAN_SUPPORT_RX_FLOW_TAG */
#ifdef WLAN_SUPPORT_RX_FISA
uint8_t fisa_enable;
#endif
#endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
};
#ifdef IPA_OFFLOAD
@@ -1809,6 +1813,10 @@ struct dp_vdev {
ol_txrx_rx_fp osif_rx;
/* callback to deliver rx frames to the OS */
ol_txrx_rx_fp osif_rx_stack;
/* Callback to handle rx fisa frames */
ol_txrx_fisa_rx_fp osif_fisa_rx;
ol_txrx_fisa_flush_fp osif_fisa_flush;
/* call back function to flush out queued rx packets*/
ol_txrx_rx_flush_fp osif_rx_flush;
ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
@@ -2193,9 +2201,10 @@ struct dp_tx_me_buf_t {
uint8_t data[QDF_MAC_ADDR_SIZE];
};
#ifdef WLAN_SUPPORT_RX_FLOW_TAG
#if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
struct hal_rx_fst;
#ifdef WLAN_SUPPORT_RX_FLOW_TAG
struct dp_rx_fse {
/* HAL Rx Flow Search Entry which matches HW definition */
void *hal_rx_fse;
@@ -2206,9 +2215,9 @@ struct dp_rx_fse {
/* Stats tracking for this flow */
struct cdp_flow_stats stats;
/* Flag indicating whether flow is IPv4 address tuple */
bool is_ipv4_addr_entry;
uint8_t is_ipv4_addr_entry;
/* Flag indicating whether flow is valid */
bool is_valid;
uint8_t is_valid;
};
struct dp_rx_fst {
@@ -2236,6 +2245,78 @@ struct dp_rx_fst {
/* Flag to indicate completion of FSE setup in HW/FW */
bool fse_setup_done;
};
#endif /* WLAN_SUPPORT_RX_FLOW_TAG */
#define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
#elif WLAN_SUPPORT_RX_FISA
enum fisa_aggr_ret {
FISA_AGGR_DONE,
FISA_AGGR_NOT_ELIGIBLE,
FISA_FLUSH_FLOW
};
struct dp_fisa_rx_sw_ft {
/* HAL Rx Flow Search Entry which matches HW definition */
void *hw_fse;
/* Toeplitz hash value */
uint32_t flow_hash;
/* Flow index, equivalent to hash value truncated to FST size */
uint32_t flow_id;
/* Stats tracking for this flow */
struct cdp_flow_stats stats;
/* Flag indicating whether flow is IPv4 address tuple */
uint8_t is_ipv4_addr_entry;
/* Flag indicating whether flow is valid */
uint8_t is_valid;
uint8_t is_populated;
uint8_t is_flow_udp;
uint8_t is_flow_tcp;
qdf_nbuf_t head_skb;
uint16_t cumulative_l4_checksum;
uint16_t adjusted_cumulative_ip_length;
uint16_t cur_aggr;
uint16_t napi_flush_cumulative_l4_checksum;
uint16_t napi_flush_cumulative_ip_length;
qdf_nbuf_t last_skb;
uint32_t head_skb_ip_hdr_offset;
uint32_t head_skb_l4_hdr_offset;
struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
uint8_t napi_id;
struct dp_vdev *vdev;
uint64_t bytes_aggregated;
uint32_t flush_count;
uint32_t aggr_count;
uint8_t do_not_aggregate;
uint16_t hal_cumultive_ip_len;
struct dp_soc *soc_hdl;
};
#define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_fisa_rx_sw_ft)
struct dp_rx_fst {
/* Software (DP) FST */
uint8_t *base;
/* Pointer to HAL FST */
struct hal_rx_fst *hal_rx_fst;
/* Base physical address of HAL RX HW FST */
uint64_t hal_rx_fst_base_paddr;
/* Maximum number of flows FSE supports */
uint16_t max_entries;
/* Num entries in flow table */
uint16_t num_entries;
/* SKID Length */
uint16_t max_skid_length;
/* Hash mask to obtain legitimate hash entry */
uint32_t hash_mask;
/* Lock for adding/deleting entries of FST */
qdf_spinlock_t dp_rx_fst_lock;
uint32_t add_flow_count;
uint32_t del_flow_count;
uint32_t hash_collision_cnt;
struct dp_soc *soc_hdl;
};
#endif /* WLAN_SUPPORT_RX_FISA */
#endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
#endif /* _DP_TYPES_H_ */

View File

@@ -464,6 +464,12 @@ struct hal_hw_txrx_ops {
void (*hal_rx_get_rtt_info)(void *rx_tlv, void *ppdu_info_handle);
void (*hal_rx_msdu_packet_metadata_get)(uint8_t *buf,
void *msdu_pkt_metadata);
uint16_t (*hal_rx_get_fisa_cumulative_l4_checksum)(uint8_t *buf);
uint16_t (*hal_rx_get_fisa_cumulative_ip_length)(uint8_t *buf);
bool (*hal_rx_get_udp_proto)(uint8_t *buf);
bool (*hal_rx_get_fisa_flow_agg_continuation)(uint8_t *buf);
uint8_t (*hal_rx_get_fisa_flow_agg_count)(uint8_t *buf);
bool (*hal_rx_get_fisa_timeout)(uint8_t *buf);
};
/**

View File

@@ -995,6 +995,14 @@ hal_rx_mpdu_peer_meta_data_set(uint8_t *buf, uint32_t peer_mdata)
RX_MSDU_START_2_TCP_PROTO_MASK, \
RX_MSDU_START_2_TCP_PROTO_LSB))
#define HAL_RX_TLV_GET_UDP_PROTO(buf) \
(_HAL_MS( \
(*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\
msdu_start_tlv.rx_msdu_start), \
RX_MSDU_START_2_UDP_PROTO_OFFSET)), \
RX_MSDU_START_2_UDP_PROTO_MASK, \
RX_MSDU_START_2_UDP_PROTO_LSB))
#define HAL_RX_TLV_GET_IPV6(buf) \
(_HAL_MS( \
(*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\
@@ -3461,4 +3469,151 @@ hal_rx_msdu_metadata_get(hal_soc_handle_t hal_soc_hdl, uint8_t *buf,
return hal_soc->ops->hal_rx_msdu_packet_metadata_get(buf, msdu_md);
}
/**
* hal_rx_get_fisa_cumulative_l4_checksum: API to get cumulative_l4_checksum
* from rx_msdu_end TLV
* @buf: pointer to the start of RX PKT TLV headers
*
* Return: cumulative_l4_checksum
*/
static inline uint16_t
hal_rx_get_fisa_cumulative_l4_checksum(hal_soc_handle_t hal_soc_hdl,
uint8_t *buf)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (!hal_soc || !hal_soc->ops) {
hal_err("hal handle is NULL");
QDF_BUG(0);
return 0;
}
if (!hal_soc->ops->hal_rx_get_fisa_cumulative_l4_checksum)
return 0;
return hal_soc->ops->hal_rx_get_fisa_cumulative_l4_checksum(buf);
}
/**
* hal_rx_get_fisa_cumulative_ip_length: API to get cumulative_ip_length
* from rx_msdu_end TLV
* @buf: pointer to the start of RX PKT TLV headers
*
* Return: cumulative_ip_length
*/
static inline uint16_t
hal_rx_get_fisa_cumulative_ip_length(hal_soc_handle_t hal_soc_hdl,
uint8_t *buf)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (!hal_soc || !hal_soc->ops) {
hal_err("hal handle is NULL");
QDF_BUG(0);
return 0;
}
if (hal_soc->ops->hal_rx_get_fisa_cumulative_ip_length)
return hal_soc->ops->hal_rx_get_fisa_cumulative_ip_length(buf);
return 0;
}
/**
* hal_rx_get_udp_proto: API to get UDP proto field
* from rx_msdu_start TLV
* @buf: pointer to the start of RX PKT TLV headers
*
* Return: UDP proto field value
*/
static inline bool
hal_rx_get_udp_proto(hal_soc_handle_t hal_soc_hdl, uint8_t *buf)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (!hal_soc || !hal_soc->ops) {
hal_err("hal handle is NULL");
QDF_BUG(0);
return 0;
}
if (hal_soc->ops->hal_rx_get_udp_proto)
return hal_soc->ops->hal_rx_get_udp_proto(buf);
return 0;
}
/**
* hal_rx_get_fisa_flow_agg_continuation: API to get fisa flow_agg_continuation
* from rx_msdu_end TLV
* @buf: pointer to the start of RX PKT TLV headers
*
* Return: flow_agg_continuation bit field value
*/
static inline bool
hal_rx_get_fisa_flow_agg_continuation(hal_soc_handle_t hal_soc_hdl,
uint8_t *buf)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (!hal_soc || !hal_soc->ops) {
hal_err("hal handle is NULL");
QDF_BUG(0);
return 0;
}
if (hal_soc->ops->hal_rx_get_fisa_flow_agg_continuation)
return hal_soc->ops->hal_rx_get_fisa_flow_agg_continuation(buf);
return 0;
}
/**
* hal_rx_get_fisa_flow_agg_count: API to get fisa flow_agg count from
* rx_msdu_end TLV
* @buf: pointer to the start of RX PKT TLV headers
*
* Return: flow_agg count value
*/
static inline uint8_t
hal_rx_get_fisa_flow_agg_count(hal_soc_handle_t hal_soc_hdl,
uint8_t *buf)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (!hal_soc || !hal_soc->ops) {
hal_err("hal handle is NULL");
QDF_BUG(0);
return 0;
}
if (hal_soc->ops->hal_rx_get_fisa_flow_agg_count)
return hal_soc->ops->hal_rx_get_fisa_flow_agg_count(buf);
return 0;
}
/**
* hal_rx_get_fisa_timeout: API to get fisa time out from rx_msdu_end TLV
* @buf: pointer to the start of RX PKT TLV headers
*
* Return: fisa flow_agg timeout bit value
*/
static inline bool
hal_rx_get_fisa_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t *buf)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
if (!hal_soc || !hal_soc->ops) {
hal_err("hal handle is NULL");
QDF_BUG(0);
return 0;
}
if (hal_soc->ops->hal_rx_get_fisa_timeout)
return hal_soc->ops->hal_rx_get_fisa_timeout(buf);
return 0;
}
#endif /* _HAL_RX_H */

754
hal/wifi3.0/hal_rx_flow.c Normal file
View File

@@ -0,0 +1,754 @@
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "qdf_module.h"
#include "dp_types.h"
#include "hal_rx_flow.h"
#if defined(WLAN_SUPPORT_RX_FISA)
void hal_rx_dump_fse_table(struct hal_rx_fst *fst)
{
int i = 0;
struct rx_flow_search_entry *fse =
(struct rx_flow_search_entry *)fst->base_vaddr;
dp_info("Number flow table entries %d", fst->add_flow_count);
for (i = 0; i < fst->max_entries; i++) {
if (fse[i].valid) {
dp_info("index %d:"
" src_ip_127_96 0x%x"
" src_ip_95_640 0x%x"
" src_ip_63_32 0x%x"
" src_ip_31_0 0x%x"
" dest_ip_127_96 0x%x"
" dest_ip_95_64 0x%x"
" dest_ip_63_32 0x%x"
" dest_ip_31_0 0x%x"
" src_port 0x%x"
" dest_port 0x%x"
" l4_protocol 0x%x"
" valid 0x%x"
" reo_destination_indication 0x%x"
" msdu_drop 0x%x"
" reo_destination_handler 0x%x"
" metadata 0x%x"
" aggregation_count0x%x"
" lro_eligible 0x%x"
" msdu_count 0x%x"
" msdu_byte_count 0x%x"
" timestamp 0x%x"
" cumulative_l4_checksum 0x%x"
" cumulative_ip_length 0x%x"
" tcp_sequence_number 0x%x",
i,
fse[i].src_ip_127_96,
fse[i].src_ip_95_64,
fse[i].src_ip_63_32,
fse[i].src_ip_31_0,
fse[i].dest_ip_127_96,
fse[i].dest_ip_95_64,
fse[i].dest_ip_63_32,
fse[i].dest_ip_31_0,
fse[i].src_port,
fse[i].dest_port,
fse[i].l4_protocol,
fse[i].valid,
fse[i].reo_destination_indication,
fse[i].msdu_drop,
fse[i].reo_destination_handler,
fse[i].metadata,
fse[i].aggregation_count,
fse[i].lro_eligible,
fse[i].msdu_count,
fse[i].msdu_byte_count,
fse[i].timestamp,
fse[i].cumulative_l4_checksum,
fse[i].cumulative_ip_length,
fse[i].tcp_sequence_number);
}
}
}
#else
void hal_rx_dump_fse_table(struct hal_rx_fst *fst)
{
}
#endif
/**
* hal_rx_flow_setup_fse() - Setup a flow search entry in HW FST
* @fst: Pointer to the Rx Flow Search Table
* @table_offset: offset into the table where the flow is to be setup
* @flow: Flow Parameters
*
* Return: Success/Failure
*/
#ifdef WLAN_SUPPORT_RX_FLOW_TAG
void *
hal_rx_flow_setup_fse(struct hal_rx_fst *fst, uint32_t table_offset,
struct hal_rx_flow *flow)
{
uint8_t *fse;
bool fse_valid;
if (table_offset >= fst->max_entries) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"HAL FSE table offset %u exceeds max entries %u",
table_offset, fst->max_entries);
return NULL;
}
fse = (uint8_t *)fst->base_vaddr +
(table_offset * HAL_RX_FST_ENTRY_SIZE);
fse_valid = HAL_GET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID);
if (fse_valid) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"HAL FSE %pK already valid", fse);
return NULL;
}
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96,
qdf_htonl(flow->tuple_info.src_ip_127_96));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64,
qdf_htonl(flow->tuple_info.src_ip_95_64));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32,
qdf_htonl(flow->tuple_info.src_ip_63_32));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0,
qdf_htonl(flow->tuple_info.src_ip_31_0));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96,
qdf_htonl(flow->tuple_info.dest_ip_127_96));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64,
qdf_htonl(flow->tuple_info.dest_ip_95_64));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32,
qdf_htonl(flow->tuple_info.dest_ip_63_32));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0,
qdf_htonl(flow->tuple_info.dest_ip_31_0));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, DEST_PORT,
(flow->tuple_info.dest_port));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, SRC_PORT,
(flow->tuple_info.src_port));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL,
flow->tuple_info.l4_protocol);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER,
flow->reo_destination_handler);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, VALID, 1);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_10, METADATA,
flow->fse_metadata);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, REO_DESTINATION_INDICATION);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, REO_DESTINATION_INDICATION) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_11,
REO_DESTINATION_INDICATION,
flow->reo_destination_indication);
/* Reset all the other fields in FSE */
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, RESERVED_9);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, MSDU_DROP);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, RESERVED_11);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, MSDU_COUNT);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_12, MSDU_BYTE_COUNT);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_13, TIMESTAMP);
return fse;
}
#elif defined(WLAN_SUPPORT_RX_FISA)
/**
* hal_rx_flow_setup_fse() - Setup a flow search entry in HW FST
* @fst: Pointer to the Rx Flow Search Table
* @table_offset: offset into the table where the flow is to be setup
* @flow: Flow Parameters
*
* Flow table entry fields are updated in host byte order, little endian order.
*
* Return: Success/Failure
*/
void *
hal_rx_flow_setup_fse(struct hal_rx_fst *fst, uint32_t table_offset,
struct hal_rx_flow *flow)
{
uint8_t *fse;
bool fse_valid;
if (table_offset >= fst->max_entries) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"HAL FSE table offset %u exceeds max entries %u",
table_offset, fst->max_entries);
return NULL;
}
fse = (uint8_t *)fst->base_vaddr +
(table_offset * HAL_RX_FST_ENTRY_SIZE);
fse_valid = HAL_GET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID);
if (fse_valid) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"HAL FSE %pK already valid", fse);
return NULL;
}
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96,
(flow->tuple_info.src_ip_127_96));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64,
(flow->tuple_info.src_ip_95_64));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32,
(flow->tuple_info.src_ip_63_32));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0,
(flow->tuple_info.src_ip_31_0));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96,
(flow->tuple_info.dest_ip_127_96));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64,
(flow->tuple_info.dest_ip_95_64));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32,
(flow->tuple_info.dest_ip_63_32));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0,
(flow->tuple_info.dest_ip_31_0));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, DEST_PORT,
(flow->tuple_info.dest_port));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, SRC_PORT,
(flow->tuple_info.src_port));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL,
flow->tuple_info.l4_protocol);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER,
flow->reo_destination_handler);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, VALID, 1);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_10, METADATA,
(flow->fse_metadata));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_INDICATION);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_INDICATION) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9,
REO_DESTINATION_INDICATION,
flow->reo_destination_indication);
/* Reset all the other fields in FSE */
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, RESERVED_9);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, MSDU_DROP);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, MSDU_COUNT);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_12, MSDU_BYTE_COUNT);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_13, TIMESTAMP);
return fse;
}
#endif /* WLAN_SUPPORT_RX_FISA */
qdf_export_symbol(hal_rx_flow_setup_fse);
/**
* hal_rx_flow_delete_entry() - Delete a flow from the Rx Flow Search Table
* @fst: Pointer to the Rx Flow Search Table
* @hal_rx_fse: Pointer to the Rx Flow that is to be deleted from the FST
*
* Return: Success/Failure
*/
inline QDF_STATUS
hal_rx_flow_delete_entry(struct hal_rx_fst *fst, void *hal_rx_fse)
{
uint8_t *fse = (uint8_t *)hal_rx_fse;
if (!HAL_GET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID))
return QDF_STATUS_E_NOENT;
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID);
return QDF_STATUS_SUCCESS;
}
qdf_export_symbol(hal_rx_flow_delete_entry);
/**
* hal_rx_fst_key_configure() - Configure the Toeplitz key in the FST
* @fst: Pointer to the Rx Flow Search Table
*
* Return: Success/Failure
*/
static void hal_rx_fst_key_configure(struct hal_rx_fst *fst)
{
uint8_t key_bytes[HAL_FST_HASH_KEY_SIZE_BYTES];
qdf_mem_copy(key_bytes, fst->key, HAL_FST_HASH_KEY_SIZE_BYTES);
/**
* The Toeplitz algorithm as per the Microsoft spec works in a
* “big-endian” manner, using the MSBs of the key to hash the
* initial bytes of the input going on to use up the lower order bits
* of the key to hash further bytes of the input until the LSBs of the
* key are used finally.
*
* So first, rightshift 320-bit input key 5 times to get 315 MS bits
*/
key_bitwise_shift_left(key_bytes, HAL_FST_HASH_KEY_SIZE_BYTES, 5);
key_reverse(fst->shifted_key, key_bytes, HAL_FST_HASH_KEY_SIZE_BYTES);
}
/**
* hal_rx_fst_get_base() - Retrieve the virtual base address of the Rx FST
* @fst: Pointer to the Rx Flow Search Table
*
* Return: Success/Failure
*/
static inline void *hal_rx_fst_get_base(struct hal_rx_fst *fst)
{
return fst->base_vaddr;
}
/**
* hal_rx_fst_get_fse_size() - Retrieve the size of each entry(flow) in Rx FST
*
* Return: size of each entry/flow in Rx FST
*/
static inline uint32_t hal_rx_fst_get_fse_size(void)
{
return HAL_RX_FST_ENTRY_SIZE;
}
/**
* hal_rx_flow_get_tuple_info() - Retrieve the 5-tuple flow info for an entry
* @hal_fse: Pointer to the Flow in Rx FST
* @tuple_info: 5-tuple info of the flow returned to the caller
*
* Return: Success/Failure
*/
QDF_STATUS hal_rx_flow_get_tuple_info(void *hal_fse,
struct hal_flow_tuple_info *tuple_info)
{
if (!hal_fse || !tuple_info)
return QDF_STATUS_E_INVAL;
if (!HAL_GET_FLD(hal_fse, RX_FLOW_SEARCH_ENTRY_9, VALID))
return QDF_STATUS_E_NOENT;
tuple_info->src_ip_127_96 =
qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_0,
SRC_IP_127_96));
tuple_info->src_ip_95_64 =
qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_1,
SRC_IP_95_64));
tuple_info->src_ip_63_32 =
qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_2,
SRC_IP_63_32));
tuple_info->src_ip_31_0 =
qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_3,
SRC_IP_31_0));
tuple_info->dest_ip_127_96 =
qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_4,
DEST_IP_127_96));
tuple_info->dest_ip_95_64 =
qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_5,
DEST_IP_95_64));
tuple_info->dest_ip_63_32 =
qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_6,
DEST_IP_63_32));
tuple_info->dest_ip_31_0 =
qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_7,
DEST_IP_31_0));
tuple_info->dest_port = HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_8,
DEST_PORT);
tuple_info->src_port = HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_8,
SRC_PORT);
tuple_info->l4_protocol = HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_9,
L4_PROTOCOL);
return QDF_STATUS_SUCCESS;
}
/**
* hal_flow_toeplitz_create_cache() - Calculate hashes for each possible
* byte value with the key taken as is
*
* @fst: FST Handle
* @key: Hash Key
*
* Return: Success/Failure
*/
static void hal_flow_toeplitz_create_cache(struct hal_rx_fst *fst)
{
int bit;
int val;
int i;
uint8_t *key = fst->shifted_key;
/*
* Initialise to first 32 bits of the key; shift in further key material
* through the loop
*/
uint32_t cur_key = (key[0] << 24) | (key[1] << 16) | (key[2] << 8) |
key[3];
for (i = 0; i < HAL_FST_HASH_KEY_SIZE_BYTES; i++) {
uint8_t new_key_byte;
uint32_t shifted_key[8];
if (i + 4 < HAL_FST_HASH_KEY_SIZE_BYTES)
new_key_byte = key[i + 4];
else
new_key_byte = 0;
shifted_key[0] = cur_key;
for (bit = 1; bit < 8; bit++) {
/*
* For each iteration, shift out one more bit of the
* current key and shift in one more bit of the new key
* material
*/
shifted_key[bit] = cur_key << bit |
new_key_byte >> (8 - bit);
}
for (val = 0; val < (1 << 8); val++) {
uint32_t hash = 0;
int mask;
/*
* For each bit set in the input, XOR in
* the appropriately shifted key
*/
for (bit = 0, mask = 1 << 7; bit < 8; bit++, mask >>= 1)
if ((val & mask))
hash ^= shifted_key[bit];
fst->key_cache[i][val] = hash;
}
cur_key = cur_key << 8 | new_key_byte;
}
}
/**
* hal_rx_fst_attach() - Initialize Rx flow search table in HW FST
*
* @qdf_dev: QDF device handle
* @hal_fst_base_paddr: Pointer to the physical base address of the Rx FST
* @max_entries: Max number of flows allowed in the FST
* @max_search: Number of collisions allowed in the hash-based FST
* @hash_key: Toeplitz key used for the hash FST
*
* Return:
*/
struct hal_rx_fst *
hal_rx_fst_attach(qdf_device_t qdf_dev,
uint64_t *hal_fst_base_paddr, uint16_t max_entries,
uint16_t max_search, uint8_t *hash_key)
{
struct hal_rx_fst *fst = qdf_mem_malloc(sizeof(struct hal_rx_fst));
if (!fst) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("hal fst allocation failed,"));
return NULL;
}
qdf_mem_set(fst, 0, sizeof(struct hal_rx_fst));
fst->key = hash_key;
fst->max_skid_length = max_search;
fst->max_entries = max_entries;
fst->hash_mask = max_entries - 1;
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"HAL FST allocation %x %d * %d\n", fst,
fst->max_entries, HAL_RX_FST_ENTRY_SIZE);
fst->base_vaddr = (uint8_t *)qdf_mem_alloc_consistent(qdf_dev,
qdf_dev->dev,
(fst->max_entries * HAL_RX_FST_ENTRY_SIZE),
&fst->base_paddr);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
"hal_rx_fst base address 0x%x", fst->base_paddr);
if (!fst->base_vaddr) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("hal fst->base_vaddr allocation failed"));
qdf_mem_free(fst);
return NULL;
}
QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
(void *)fst->key, HAL_FST_HASH_KEY_SIZE_BYTES);
qdf_mem_set((uint8_t *)fst->base_vaddr, 0,
(fst->max_entries * HAL_RX_FST_ENTRY_SIZE));
hal_rx_fst_key_configure(fst);
hal_flow_toeplitz_create_cache(fst);
*hal_fst_base_paddr = (uint64_t)fst->base_paddr;
return fst;
}
qdf_export_symbol(hal_rx_fst_attach);
/**
* hal_rx_fst_detach() - De-init the Rx flow search table from HW
*
* @rx_fst: Pointer to the Rx FST
* @qdf_dev: QDF device handle
*
* Return:
*/
void hal_rx_fst_detach(struct hal_rx_fst *rx_fst,
qdf_device_t qdf_dev)
{
if (!rx_fst || !qdf_dev)
return;
qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
rx_fst->max_entries * HAL_RX_FST_ENTRY_SIZE,
rx_fst->base_vaddr, rx_fst->base_paddr, 0);
qdf_mem_free(rx_fst);
}
qdf_export_symbol(hal_rx_fst_detach);
/**
* hal_flow_toeplitz_hash() - Calculate Toeplitz hash by using the cached key
*
* @hal_fst: FST Handle
* @flow: Flow Parameters
*
* Return: Success/Failure
*/
uint32_t
hal_flow_toeplitz_hash(void *hal_fst, struct hal_rx_flow *flow)
{
int i, j;
uint32_t hash = 0;
struct hal_rx_fst *fst = (struct hal_rx_fst *)hal_fst;
uint32_t input[HAL_FST_HASH_KEY_SIZE_WORDS];
uint8_t *tuple;
qdf_mem_zero(input, HAL_FST_HASH_KEY_SIZE_BYTES);
*(uint32_t *)&input[0] = qdf_htonl(flow->tuple_info.src_ip_127_96);
*(uint32_t *)&input[1] = qdf_htonl(flow->tuple_info.src_ip_95_64);
*(uint32_t *)&input[2] = qdf_htonl(flow->tuple_info.src_ip_63_32);
*(uint32_t *)&input[3] = qdf_htonl(flow->tuple_info.src_ip_31_0);
*(uint32_t *)&input[4] = qdf_htonl(flow->tuple_info.dest_ip_127_96);
*(uint32_t *)&input[5] = qdf_htonl(flow->tuple_info.dest_ip_95_64);
*(uint32_t *)&input[6] = qdf_htonl(flow->tuple_info.dest_ip_63_32);
*(uint32_t *)&input[7] = qdf_htonl(flow->tuple_info.dest_ip_31_0);
*(uint32_t *)&input[8] = (flow->tuple_info.dest_port << 16) |
(flow->tuple_info.src_port);
*(uint32_t *)&input[9] = flow->tuple_info.l4_protocol;
tuple = (uint8_t *)input;
QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
tuple, sizeof(input));
for (i = 0, j = HAL_FST_HASH_DATA_SIZE - 1;
i < HAL_FST_HASH_KEY_SIZE_BYTES && j >= 0; i++, j--) {
hash ^= fst->key_cache[i][tuple[j]];
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
"Hash value %u %u truncated hash %u\n", hash,
(hash >> 12), (hash >> 12) % (fst->max_entries));
hash >>= 12;
hash &= (fst->max_entries - 1);
return hash;
}
qdf_export_symbol(hal_flow_toeplitz_hash);
/**
* hal_rx_get_hal_hash() - Retrieve hash index of a flow in the FST table
*
* @hal_fst: HAL Rx FST Handle
* @flow_hash: Flow hash computed from flow tuple
*
* Return: hash index truncated to the size of the hash table
*/
uint32_t hal_rx_get_hal_hash(struct hal_rx_fst *hal_fst, uint32_t flow_hash)
{
uint32_t trunc_hash = flow_hash;
/* Take care of hash wrap around scenario */
if (flow_hash >= hal_fst->max_entries)
trunc_hash &= hal_fst->hash_mask;
return trunc_hash;
}
qdf_export_symbol(hal_rx_get_hal_hash);
/**
* hal_rx_insert_flow_entry() - Add a flow into the FST table
*
* @hal_fst: HAL Rx FST Handle
* @flow_hash: Flow hash computed from flow tuple
* @flow_tuple_info: Flow tuple used to compute the hash
* @flow_index: Hash index of the flow in the table when inserted successfully
*
* Return: Success if flow is inserted into the table, error otherwise
*/
QDF_STATUS
hal_rx_insert_flow_entry(struct hal_rx_fst *fst, uint32_t flow_hash,
void *flow_tuple_info, uint32_t *flow_idx)
{
int i;
void *hal_fse;
uint32_t hal_hash;
struct hal_flow_tuple_info hal_tuple_info = { 0 };
QDF_STATUS status;
for (i = 0; i < fst->max_skid_length; i++) {
hal_hash = hal_rx_get_hal_hash(fst, (flow_hash + i));
hal_fse = (uint8_t *)fst->base_vaddr +
(hal_hash * HAL_RX_FST_ENTRY_SIZE);
status = hal_rx_flow_get_tuple_info(hal_fse, &hal_tuple_info);
if (status == QDF_STATUS_E_NOENT)
break;
/* Find the matching flow entry in HW FST */
if (!qdf_mem_cmp(&hal_tuple_info,
flow_tuple_info,
sizeof(struct hal_flow_tuple_info))) {
dp_err("Duplicate flow entry in FST %u at skid %u ",
hal_hash, i);
return QDF_STATUS_E_EXISTS;
}
}
if (i == fst->max_skid_length) {
dp_err("Max skid length reached for hash %u", flow_hash);
return QDF_STATUS_E_RANGE;
}
*flow_idx = hal_hash;
dp_info("flow_hash = %u, skid_entry = %d, flow_addr = %pK flow_idx = %d",
flow_hash, i, hal_fse, *flow_idx);
return QDF_STATUS_SUCCESS;
}
qdf_export_symbol(hal_rx_insert_flow_entry);
/**
* hal_rx_find_flow_from_tuple() - Find a flow in the FST table
*
* @fst: HAL Rx FST Handle
* @flow_hash: Flow hash computed from flow tuple
* @flow_tuple_info: Flow tuple used to compute the hash
* @flow_index: Hash index of the flow in the table when found
*
* Return: Success if matching flow is found in the table, error otherwise
*/
QDF_STATUS
hal_rx_find_flow_from_tuple(struct hal_rx_fst *fst, uint32_t flow_hash,
void *flow_tuple_info, uint32_t *flow_idx)
{
int i;
void *hal_fse;
uint32_t hal_hash;
struct hal_flow_tuple_info hal_tuple_info = { 0 };
QDF_STATUS status;
for (i = 0; i < fst->max_skid_length; i++) {
hal_hash = hal_rx_get_hal_hash(fst, (flow_hash + i));
hal_fse = (uint8_t *)fst->base_vaddr +
(hal_hash * HAL_RX_FST_ENTRY_SIZE);
status = hal_rx_flow_get_tuple_info(hal_fse, &hal_tuple_info);
if (status != QDF_STATUS_SUCCESS)
continue;
/* Find the matching flow entry in HW FST */
if (!qdf_mem_cmp(&hal_tuple_info,
flow_tuple_info,
sizeof(struct hal_flow_tuple_info))) {
break;
}
}
if (i == fst->max_skid_length) {
dp_err("Max skid length reached for hash %u", flow_hash);
return QDF_STATUS_E_RANGE;
}
*flow_idx = hal_hash;
dp_info("flow_hash = %u, skid_entry = %d, flow_addr = %pK flow_idx = %d",
flow_hash, i, hal_fse, *flow_idx);
return QDF_STATUS_SUCCESS;
}
qdf_export_symbol(hal_rx_find_flow_from_tuple);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -99,6 +99,8 @@ struct hal_rx_fst {
uint16_t max_skid_length;
uint16_t hash_mask;
uint32_t key_cache[HAL_FST_HASH_KEY_SIZE_BYTES][1 << 8];
uint32_t add_flow_count;
uint32_t del_flow_count;
};
/**
@@ -109,108 +111,9 @@ struct hal_rx_fst {
*
* Return: Success/Failure
*/
static void *
hal_rx_flow_setup_fse(struct hal_rx_fst *fst, uint32_t table_offset,
struct hal_rx_flow *flow)
{
uint8_t *fse;
bool fse_valid;
if (table_offset >= fst->max_entries) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"HAL FSE table offset %u exceeds max entries %u",
table_offset, fst->max_entries);
return NULL;
}
fse = (uint8_t *)fst->base_vaddr +
(table_offset * HAL_RX_FST_ENTRY_SIZE);
fse_valid = HAL_GET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID);
if (fse_valid) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"HAL FSE %pK already valid", fse);
return NULL;
}
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96,
qdf_htonl(flow->tuple_info.src_ip_127_96));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64,
qdf_htonl(flow->tuple_info.src_ip_95_64));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32,
qdf_htonl(flow->tuple_info.src_ip_63_32));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0,
qdf_htonl(flow->tuple_info.src_ip_31_0));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96,
qdf_htonl(flow->tuple_info.dest_ip_127_96));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64,
qdf_htonl(flow->tuple_info.dest_ip_95_64));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32,
qdf_htonl(flow->tuple_info.dest_ip_63_32));
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0,
qdf_htonl(flow->tuple_info.dest_ip_31_0));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, DEST_PORT) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, DEST_PORT,
(flow->tuple_info.dest_port));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_8, SRC_PORT) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_8, SRC_PORT,
(flow->tuple_info.src_port));
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL,
flow->tuple_info.l4_protocol);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, REO_DESTINATION_HANDLER,
flow->reo_destination_handler);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_9, VALID, 1);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_10, METADATA) =
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_10, METADATA,
flow->fse_metadata);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, REO_DESTINATION_INDICATION);
HAL_SET_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, REO_DESTINATION_INDICATION) |=
HAL_SET_FLD_SM(RX_FLOW_SEARCH_ENTRY_11,
REO_DESTINATION_INDICATION,
flow->reo_destination_indication);
/* Reset all the other fields in FSE */
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, RESERVED_9);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, MSDU_DROP);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, RESERVED_11);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_11, MSDU_COUNT);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_12, MSDU_BYTE_COUNT);
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_13, TIMESTAMP);
return fse;
}
void *hal_rx_flow_setup_fse(struct hal_rx_fst *fst,
uint32_t table_offset,
struct hal_rx_flow *flow);
/**
* hal_rx_flow_delete_entry() - Delete a flow from the Rx Flow Search Table
@@ -219,64 +122,8 @@ hal_rx_flow_setup_fse(struct hal_rx_fst *fst, uint32_t table_offset,
*
* Return: Success/Failure
*/
static inline QDF_STATUS
hal_rx_flow_delete_entry(struct hal_rx_fst *fst, void *hal_rx_fse)
{
uint8_t *fse = (uint8_t *)hal_rx_fse;
if (!HAL_GET_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID))
return QDF_STATUS_E_NOENT;
HAL_CLR_FLD(fse, RX_FLOW_SEARCH_ENTRY_9, VALID);
return QDF_STATUS_SUCCESS;
}
/**
* hal_rx_fst_key_configure() - Configure the Toeplitz key in the FST
* @fst: Pointer to the Rx Flow Search Table
*
* Return: Success/Failure
*/
static void hal_rx_fst_key_configure(struct hal_rx_fst *fst)
{
uint8_t key_bytes[HAL_FST_HASH_KEY_SIZE_BYTES];
qdf_mem_copy(key_bytes, fst->key, HAL_FST_HASH_KEY_SIZE_BYTES);
/**
* The Toeplitz algorithm as per the Microsoft spec works in a
* “big-endian” manner, using the MSBs of the key to hash the
* initial bytes of the input going on to use up the lower order bits
* of the key to hash further bytes of the input until the LSBs of the
* key are used finally.
*
* So first, rightshift 320-bit input key 5 times to get 315 MS bits
*/
key_bitwise_shift_left(key_bytes, HAL_FST_HASH_KEY_SIZE_BYTES, 5);
key_reverse(fst->shifted_key, key_bytes, HAL_FST_HASH_KEY_SIZE_BYTES);
}
/**
* hal_rx_fst_get_base() - Retrieve the virtual base address of the Rx FST
* @fst: Pointer to the Rx Flow Search Table
*
* Return: Success/Failure
*/
static inline void *hal_rx_fst_get_base(struct hal_rx_fst *fst)
{
return fst->base_vaddr;
}
/**
* hal_rx_fst_get_fse_size() - Retrieve the size of each entry(flow) in Rx FST
*
* Return: size of each entry/flow in Rx FST
*/
static inline uint32_t hal_rx_fst_get_fse_size(void)
{
return HAL_RX_FST_ENTRY_SIZE;
}
QDF_STATUS
hal_rx_flow_delete_entry(struct hal_rx_fst *fst, void *hal_rx_fse);
/**
* hal_rx_flow_get_tuple_info() - Retrieve the 5-tuple flow info for an entry
@@ -286,103 +133,7 @@ static inline uint32_t hal_rx_fst_get_fse_size(void)
* Return: Success/Failure
*/
QDF_STATUS hal_rx_flow_get_tuple_info(void *hal_fse,
struct hal_flow_tuple_info *tuple_info)
{
if (!hal_fse || !tuple_info)
return QDF_STATUS_E_INVAL;
if (!HAL_GET_FLD(hal_fse, RX_FLOW_SEARCH_ENTRY_9, VALID))
return QDF_STATUS_E_NOENT;
tuple_info->src_ip_127_96 = qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_0, SRC_IP_127_96));
tuple_info->src_ip_95_64 = qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_1, SRC_IP_95_64));
tuple_info->src_ip_63_32 = qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_2, SRC_IP_63_32));
tuple_info->src_ip_31_0 = qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_3, SRC_IP_31_0));
tuple_info->dest_ip_127_96 =
qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_4, DEST_IP_127_96));
tuple_info->dest_ip_95_64 = qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_5, DEST_IP_95_64));
tuple_info->dest_ip_63_32 = qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_6, DEST_IP_63_32));
tuple_info->dest_ip_31_0 = qdf_ntohl(HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_7, DEST_IP_31_0));
tuple_info->dest_port = (HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_8, DEST_PORT));
tuple_info->src_port = (HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_8, SRC_PORT));
tuple_info->l4_protocol = HAL_GET_FLD(hal_fse,
RX_FLOW_SEARCH_ENTRY_9, L4_PROTOCOL);
return QDF_STATUS_SUCCESS;
}
/**
* hal_flow_toeplitz_create_cache() - Calculate hashes for each possible
* byte value with the key taken as is
*
* @fst: FST Handle
* @key: Hash Key
*
* Return: Success/Failure
*/
void hal_flow_toeplitz_create_cache(struct hal_rx_fst *fst)
{
int bit;
int val;
int i;
uint8_t *key = fst->shifted_key;
/*
* Initialise to first 32 bits of the key; shift in further key material
* through the loop
*/
uint32_t cur_key = (key[0] << 24) | (key[1] << 16) | (key[2] << 8) |
key[3];
for (i = 0; i < HAL_FST_HASH_KEY_SIZE_BYTES; i++) {
uint8_t new_key_byte;
uint32_t shifted_key[8];
if (i + 4 < HAL_FST_HASH_KEY_SIZE_BYTES)
new_key_byte = key[i + 4];
else
new_key_byte = 0;
shifted_key[0] = cur_key;
for (bit = 1; bit < 8; bit++) {
/*
* For each iteration, shift out one more bit of the
* current key and shift in one more bit of the new key
* material
*/
shifted_key[bit] = cur_key << bit |
new_key_byte >> (8 - bit);
}
for (val = 0; val < (1 << 8); val++) {
uint32_t hash = 0;
int mask;
/*
* For each bit set in the input, XOR in
* the appropriately shifted key
*/
for (bit = 0, mask = 1 << 7; bit < 8; bit++, mask >>= 1)
if ((val & mask))
hash ^= shifted_key[bit];
fst->key_cache[i][val] = hash;
}
cur_key = cur_key << 8 | new_key_byte;
}
}
struct hal_flow_tuple_info *tuple_info);
/**
* hal_rx_fst_attach() - Initialize Rx flow search table in HW FST
@@ -395,52 +146,10 @@ void hal_flow_toeplitz_create_cache(struct hal_rx_fst *fst)
*
* Return:
*/
static struct hal_rx_fst *
struct hal_rx_fst *
hal_rx_fst_attach(qdf_device_t qdf_dev,
uint64_t *hal_fst_base_paddr, uint16_t max_entries,
uint16_t max_search, uint8_t *hash_key)
{
struct hal_rx_fst *fst = qdf_mem_malloc(sizeof(struct hal_rx_fst));
if (!fst) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("hal fst allocation failed,"));
return NULL;
}
qdf_mem_set(fst, 0, sizeof(struct hal_rx_fst));
fst->key = hash_key;
fst->max_skid_length = max_search;
fst->max_entries = max_entries;
fst->hash_mask = max_entries - 1;
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"HAL FST allocation %x %d * %d\n", fst,
fst->max_entries, HAL_RX_FST_ENTRY_SIZE);
fst->base_vaddr = (uint8_t *)qdf_mem_alloc_consistent(qdf_dev,
qdf_dev->dev,
(fst->max_entries * HAL_RX_FST_ENTRY_SIZE),
&fst->base_paddr);
if (!fst->base_vaddr) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
FL("hal fst->base_vaddr allocation failed"));
qdf_mem_free(fst);
return NULL;
}
QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
(void *)fst->key, HAL_FST_HASH_KEY_SIZE_BYTES);
qdf_mem_set((uint8_t *)fst->base_vaddr, 0,
(fst->max_entries * HAL_RX_FST_ENTRY_SIZE));
hal_rx_fst_key_configure(fst);
hal_flow_toeplitz_create_cache(fst);
*hal_fst_base_paddr = (uint64_t)fst->base_paddr;
return fst;
}
uint16_t max_search, uint8_t *hash_key);
/**
* hal_rx_fst_detach() - De-init the Rx flow search table from HW
@@ -450,85 +159,7 @@ hal_rx_fst_attach(qdf_device_t qdf_dev,
*
* Return:
*/
void hal_rx_fst_detach(struct hal_rx_fst *rx_fst,
qdf_device_t qdf_dev)
{
if (!rx_fst || !qdf_dev)
return;
qdf_mem_free_consistent(qdf_dev, qdf_dev->dev,
rx_fst->max_entries * HAL_RX_FST_ENTRY_SIZE,
rx_fst->base_vaddr, rx_fst->base_paddr, 0);
qdf_mem_free(rx_fst);
}
/**
* hal_flow_toeplitz_hash() - Calculate Toeplitz hash by using the cached key
*
* @hal_fst: FST Handle
* @flow: Flow Parameters
*
* Return: Success/Failure
*/
static inline uint32_t
hal_flow_toeplitz_hash(void *hal_fst, struct hal_rx_flow *flow)
{
int i, j;
uint32_t hash = 0;
struct hal_rx_fst *fst = (struct hal_rx_fst *)hal_fst;
uint32_t input[HAL_FST_HASH_KEY_SIZE_WORDS];
uint8_t *tuple;
qdf_mem_zero(input, HAL_FST_HASH_KEY_SIZE_BYTES);
*(uint32_t *)&input[0] = qdf_htonl(flow->tuple_info.src_ip_127_96);
*(uint32_t *)&input[1] = qdf_htonl(flow->tuple_info.src_ip_95_64);
*(uint32_t *)&input[2] = qdf_htonl(flow->tuple_info.src_ip_63_32);
*(uint32_t *)&input[3] = qdf_htonl(flow->tuple_info.src_ip_31_0);
*(uint32_t *)&input[4] = qdf_htonl(flow->tuple_info.dest_ip_127_96);
*(uint32_t *)&input[5] = qdf_htonl(flow->tuple_info.dest_ip_95_64);
*(uint32_t *)&input[6] = qdf_htonl(flow->tuple_info.dest_ip_63_32);
*(uint32_t *)&input[7] = qdf_htonl(flow->tuple_info.dest_ip_31_0);
*(uint32_t *)&input[8] = (flow->tuple_info.dest_port << 16) |
(flow->tuple_info.src_port);
*(uint32_t *)&input[9] = flow->tuple_info.l4_protocol;
tuple = (uint8_t *)input;
QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
tuple, sizeof(input));
for (i = 0, j = HAL_FST_HASH_DATA_SIZE - 1;
i < HAL_FST_HASH_KEY_SIZE_BYTES && j >= 0; i++, j--) {
hash ^= fst->key_cache[i][tuple[j]];
}
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
"Hash value %u %u truncated hash %u\n", hash,
(hash >> 12), (hash >> 12) % (fst->max_entries));
hash >>= 12;
hash &= (fst->max_entries - 1);
return hash;
}
/**
* hal_rx_get_hal_hash() - Retrieve hash index of a flow in the FST table
*
* @hal_fst: HAL Rx FST Handle
* @flow_hash: Flow hash computed from flow tuple
*
* Return: hash index truncated to the size of the hash table
*/
inline
uint32_t hal_rx_get_hal_hash(struct hal_rx_fst *hal_fst, uint32_t flow_hash)
{
uint32_t trunc_hash = flow_hash;
/* Take care of hash wrap around scenario */
if (flow_hash >= hal_fst->max_entries)
trunc_hash &= hal_fst->hash_mask;
return trunc_hash;
}
void hal_rx_fst_detach(struct hal_rx_fst *rx_fst, qdf_device_t qdf_dev);
/**
* hal_rx_insert_flow_entry() - Add a flow into the FST table
@@ -542,40 +173,7 @@ uint32_t hal_rx_get_hal_hash(struct hal_rx_fst *hal_fst, uint32_t flow_hash)
*/
QDF_STATUS
hal_rx_insert_flow_entry(struct hal_rx_fst *fst, uint32_t flow_hash,
void *flow_tuple_info, uint32_t *flow_idx) {
int i;
void *hal_fse;
uint32_t hal_hash;
struct hal_flow_tuple_info hal_tuple_info = { 0 };
QDF_STATUS status;
for (i = 0; i < fst->max_skid_length; i++) {
hal_hash = hal_rx_get_hal_hash(fst, (flow_hash + i));
hal_fse = (uint8_t *)fst->base_vaddr +
(hal_hash * HAL_RX_FST_ENTRY_SIZE);
status = hal_rx_flow_get_tuple_info(hal_fse, &hal_tuple_info);
if (QDF_STATUS_E_NOENT == status)
break;
/* Find the matching flow entry in HW FST */
if (!qdf_mem_cmp(&hal_tuple_info,
flow_tuple_info,
sizeof(struct hal_flow_tuple_info))) {
dp_err("Duplicate flow entry in FST %u at skid %u ",
hal_hash, i);
return QDF_STATUS_E_EXISTS;
}
}
if (i == fst->max_skid_length) {
dp_err("Max skid length reached for hash %u", flow_hash);
return QDF_STATUS_E_RANGE;
}
*flow_idx = hal_hash;
dp_info("flow_hash = %u, skid_entry = %d, flow_addr = %pK flow_idx = %d",
flow_hash, i, hal_fse, *flow_idx);
return QDF_STATUS_SUCCESS;
}
void *flow_tuple_info, uint32_t *flow_idx);
/**
* hal_rx_find_flow_from_tuple() - Find a flow in the FST table
@@ -589,40 +187,28 @@ hal_rx_insert_flow_entry(struct hal_rx_fst *fst, uint32_t flow_hash,
*/
QDF_STATUS
hal_rx_find_flow_from_tuple(struct hal_rx_fst *fst, uint32_t flow_hash,
void *flow_tuple_info, uint32_t *flow_idx)
{
int i;
void *hal_fse;
uint32_t hal_hash;
struct hal_flow_tuple_info hal_tuple_info = { 0 };
QDF_STATUS status;
void *flow_tuple_info, uint32_t *flow_idx);
for (i = 0; i < fst->max_skid_length; i++) {
hal_hash = hal_rx_get_hal_hash(fst, (flow_hash + i));
hal_fse = (uint8_t *)fst->base_vaddr +
(hal_hash * HAL_RX_FST_ENTRY_SIZE);
status = hal_rx_flow_get_tuple_info(hal_fse, &hal_tuple_info);
if (QDF_STATUS_SUCCESS != status)
continue;
/**
* hal_rx_get_hal_hash() - Retrieve hash index of a flow in the FST table
*
* @hal_fst: HAL Rx FST Handle
* @flow_hash: Flow hash computed from flow tuple
*
* Return: hash index truncated to the size of the hash table
*/
uint32_t hal_rx_get_hal_hash(struct hal_rx_fst *hal_fst, uint32_t flow_hash);
/* Find the matching flow entry in HW FST */
if (!qdf_mem_cmp(&hal_tuple_info,
flow_tuple_info,
sizeof(struct hal_flow_tuple_info))) {
break;
}
}
if (i == fst->max_skid_length) {
dp_err("Max skid length reached for hash %u", flow_hash);
return QDF_STATUS_E_RANGE;
}
*flow_idx = hal_hash;
dp_info("flow_hash = %u, skid_entry = %d, flow_addr = %pK flow_idx = %d",
flow_hash, i, hal_fse, *flow_idx);
return QDF_STATUS_SUCCESS;
}
/**
* hal_flow_toeplitz_hash() - Calculate Toeplitz hash by using the cached key
*
* @hal_fst: FST Handle
* @flow: Flow Parameters
*
* Return: Success/Failure
*/
uint32_t
hal_flow_toeplitz_hash(void *hal_fst, struct hal_rx_flow *flow);
void hal_rx_dump_fse_table(struct hal_rx_fst *fst);
#endif /* HAL_RX_FLOW_H */

View File

@@ -1081,6 +1081,12 @@ struct hal_hw_txrx_ops qca6290_hal_hw_txrx_ops = {
NULL,
/* rx - msdu end fast path info fields */
hal_rx_msdu_packet_metadata_get_generic,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
};
struct hal_hw_srng_config hw_srng_table_6290[] = {

View File

@@ -1077,6 +1077,12 @@ struct hal_hw_txrx_ops qca6390_hal_hw_txrx_ops = {
NULL,
/* rx - msdu end fast path info fields */
hal_rx_msdu_packet_metadata_get_generic,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
};
struct hal_hw_srng_config hw_srng_table_6390[] = {

View File

@@ -316,7 +316,7 @@ static void hal_rx_dump_msdu_end_tlv_6490(void *msduend,
struct rx_msdu_end *msdu_end = (struct rx_msdu_end *)msduend;
QDF_TRACE(QDF_MODULE_ID_DP, dbg_level,
"rx_msdu_end tlv (1/2) - "
"rx_msdu_end tlv (1/3) - "
"rxpcu_mpdu_filter_in_category: %x "
"sw_frame_group_id: %x "
"phy_ppdu_id: %x "
@@ -363,7 +363,7 @@ static void hal_rx_dump_msdu_end_tlv_6490(void *msduend,
msdu_end->amsdu_parser_error);
QDF_TRACE(QDF_MODULE_ID_DP, dbg_level,
"rx_msdu_end tlv (2/2)- "
"rx_msdu_end tlv (2/3)- "
"sa_is_valid: %x "
"da_is_valid: %x "
"da_is_mcbc: %x "
@@ -412,6 +412,18 @@ static void hal_rx_dump_msdu_end_tlv_6490(void *msduend,
msdu_end->fse_metadata,
msdu_end->cce_metadata,
msdu_end->sa_sw_peer_id);
QDF_TRACE(QDF_MODULE_ID_DP, dbg_level,
"rx_msdu_end tlv (3/3)"
"aggregation_count %x "
"flow_aggregation_continuation %x "
"fisa_timeout %x "
"cumulative_l4_checksum %x "
"cumulative_ip_length %x",
msdu_end->aggregation_count,
msdu_end->flow_aggregation_continuation,
msdu_end->fisa_timeout,
msdu_end->cumulative_l4_checksum,
msdu_end->cumulative_ip_length);
}
/*
@@ -1258,6 +1270,30 @@ hal_rx_msdu_cce_metadata_get_6490(uint8_t *buf)
return HAL_RX_MSDU_END_CCE_METADATA_GET(msdu_end);
}
/**
* hal_rx_msdu_get_flow_params_6490: API to get flow index, flow index invalid
* and flow index timeout from rx_msdu_end TLV
* @buf: pointer to the start of RX PKT TLV headers
* @flow_invalid: pointer to return value of flow_idx_valid
* @flow_timeout: pointer to return value of flow_idx_timeout
* @flow_index: pointer to return value of flow_idx
*
* Return: none
*/
static inline void
hal_rx_msdu_get_flow_params_6490(uint8_t *buf,
bool *flow_invalid,
bool *flow_timeout,
uint32_t *flow_index)
{
struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf;
struct rx_msdu_end *msdu_end = &pkt_tlvs->msdu_end_tlv.rx_msdu_end;
*flow_invalid = HAL_RX_MSDU_END_FLOW_IDX_INVALID_GET(msdu_end);
*flow_timeout = HAL_RX_MSDU_END_FLOW_IDX_TIMEOUT_GET(msdu_end);
*flow_index = HAL_RX_MSDU_END_FLOW_IDX_GET(msdu_end);
}
/**
* hal_rx_tlv_get_tcp_chksum_6490() - API to get tcp checksum
* @buf: rx_tlv_hdr
@@ -1298,6 +1334,81 @@ static inline qdf_iomem_t hal_get_window_address_6490(struct hal_soc *hal_soc,
return addr;
}
/**
* hal_rx_get_fisa_cumulative_l4_checksum_6490() - Retrieve cumulative
* checksum
* @buf: buffer pointer
*
* Return: cumulative checksum
*/
static inline
uint16_t hal_rx_get_fisa_cumulative_l4_checksum_6490(uint8_t *buf)
{
return HAL_RX_TLV_GET_FISA_CUMULATIVE_L4_CHECKSUM(buf);
}
/**
* hal_rx_get_fisa_cumulative_ip_length_6490() - Retrieve cumulative
* ip length
* @buf: buffer pointer
*
* Return: cumulative length
*/
static inline
uint16_t hal_rx_get_fisa_cumulative_ip_length_6490(uint8_t *buf)
{
return HAL_RX_TLV_GET_FISA_CUMULATIVE_IP_LENGTH(buf);
}
/**
* hal_rx_get_udp_proto_6490() - Retrieve udp proto value
* @buf: buffer
*
* Return: udp proto bit
*/
static inline
bool hal_rx_get_udp_proto_6490(uint8_t *buf)
{
return HAL_RX_TLV_GET_UDP_PROTO(buf);
}
/**
* hal_rx_get_flow_agg_continuation_6490() - retrieve flow agg
* continuation
* @buf: buffer
*
* Return: flow agg
*/
static inline
bool hal_rx_get_flow_agg_continuation_6490(uint8_t *buf)
{
return HAL_RX_TLV_GET_FLOW_AGGR_CONT(buf);
}
/**
* hal_rx_get_flow_agg_count_6490()- Retrieve flow agg count
* @buf: buffer
*
* Return: flow agg count
*/
static inline
uint8_t hal_rx_get_flow_agg_count_6490(uint8_t *buf)
{
return HAL_RX_TLV_GET_FLOW_AGGR_COUNT(buf);
}
/**
* hal_rx_get_fisa_timeout_6490() - Retrieve fisa timeout
* @buf: buffer
*
* Return: fisa timeout
*/
static inline
bool hal_rx_get_fisa_timeout_6490(uint8_t *buf)
{
return HAL_RX_TLV_GET_FISA_TIMEOUT(buf);
}
struct hal_hw_txrx_ops qca6490_hal_hw_txrx_ops = {
/* init and setup */
hal_srng_dst_hw_init_generic,
@@ -1383,13 +1494,19 @@ struct hal_hw_txrx_ops qca6490_hal_hw_txrx_ops = {
hal_rx_msdu_flow_idx_timeout_6490,
hal_rx_msdu_fse_metadata_get_6490,
hal_rx_msdu_cce_metadata_get_6490,
NULL,
hal_rx_msdu_get_flow_params_6490,
hal_rx_tlv_get_tcp_chksum_6490,
hal_rx_get_rx_sequence_6490,
NULL,
NULL,
/* rx - msdu end fast path info fields */
hal_rx_msdu_packet_metadata_get_generic,
hal_rx_get_fisa_cumulative_l4_checksum_6490,
hal_rx_get_fisa_cumulative_ip_length_6490,
hal_rx_get_udp_proto_6490,
hal_rx_get_flow_agg_continuation_6490,
hal_rx_get_flow_agg_count_6490,
hal_rx_get_fisa_timeout_6490,
};
struct hal_hw_srng_config hw_srng_table_6490[] = {

View File

@@ -363,4 +363,43 @@ RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_DETAILS_RESERVED_0A_OFFSET))
RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_MASK, \
RX_MSDU_END_11_DA_IDX_OR_SW_PEER_ID_LSB))
#define HAL_RX_TLV_GET_FLOW_AGGR_CONT(buf) \
(_HAL_MS( \
(*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\
msdu_end_tlv.rx_msdu_end), \
RX_MSDU_END_17_FLOW_AGGREGATION_CONTINUATION_OFFSET)), \
RX_MSDU_END_17_FLOW_AGGREGATION_CONTINUATION_MASK, \
RX_MSDU_END_17_FLOW_AGGREGATION_CONTINUATION_LSB))
#define HAL_RX_TLV_GET_FLOW_AGGR_COUNT(buf) \
(_HAL_MS( \
(*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\
msdu_end_tlv.rx_msdu_end), \
RX_MSDU_END_17_AGGREGATION_COUNT_OFFSET)), \
RX_MSDU_END_17_AGGREGATION_COUNT_MASK, \
RX_MSDU_END_17_AGGREGATION_COUNT_LSB))
#define HAL_RX_TLV_GET_FISA_TIMEOUT(buf) \
(_HAL_MS( \
(*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\
msdu_end_tlv.rx_msdu_end), \
RX_MSDU_END_17_FISA_TIMEOUT_OFFSET)), \
RX_MSDU_END_17_FISA_TIMEOUT_MASK, \
RX_MSDU_END_17_FISA_TIMEOUT_LSB))
#define HAL_RX_TLV_GET_FISA_CUMULATIVE_L4_CHECKSUM(buf) \
(_HAL_MS( \
(*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\
msdu_end_tlv.rx_msdu_end), \
RX_MSDU_END_18_CUMULATIVE_L4_CHECKSUM_OFFSET)), \
RX_MSDU_END_18_CUMULATIVE_L4_CHECKSUM_MASK, \
RX_MSDU_END_18_CUMULATIVE_L4_CHECKSUM_LSB))
#define HAL_RX_TLV_GET_FISA_CUMULATIVE_IP_LENGTH(buf) \
(_HAL_MS( \
(*_OFFSET_TO_WORD_PTR(&(((struct rx_pkt_tlvs *)(buf))->\
msdu_end_tlv.rx_msdu_end), \
RX_MSDU_END_18_CUMULATIVE_IP_LENGTH_OFFSET)), \
RX_MSDU_END_18_CUMULATIVE_IP_LENGTH_MASK, \
RX_MSDU_END_18_CUMULATIVE_IP_LENGTH_LSB))
#endif

View File

@@ -1077,6 +1077,12 @@ struct hal_hw_txrx_ops qca8074_hal_hw_txrx_ops = {
NULL,
/* rx - msdu fast path info fields */
hal_rx_msdu_packet_metadata_get_generic,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
};
struct hal_hw_srng_config hw_srng_table_8074[] = {

View File

@@ -1082,6 +1082,12 @@ struct hal_hw_txrx_ops qca8074v2_hal_hw_txrx_ops = {
#endif
/* rx - msdu fast path info fields */
hal_rx_msdu_packet_metadata_get_generic,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
};
struct hal_hw_srng_config hw_srng_table_8074v2[] = {

View File

@@ -1460,6 +1460,12 @@ struct hal_hw_txrx_ops qcn9000_hal_hw_txrx_ops = {
NULL,
/* rx - msdu fast path info fields */
hal_rx_msdu_packet_metadata_get_9000,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
};
struct hal_hw_srng_config hw_srng_table_9000[] = {

View File

@@ -434,6 +434,7 @@ scm_update_dbs_scan_ctrl_ext_flag(struct scan_start_request *req)
{
struct wlan_objmgr_psoc *psoc;
uint32_t scan_dbs_policy = SCAN_DBS_POLICY_DEFAULT;
bool ndi_present;
psoc = wlan_vdev_get_psoc(req->vdev);
@@ -447,6 +448,15 @@ scm_update_dbs_scan_ctrl_ext_flag(struct scan_start_request *req)
goto end;
}
ndi_present = policy_mgr_mode_specific_connection_count(psoc,
PM_NDI_MODE,
NULL);
if (ndi_present && !policy_mgr_is_hw_dbs_2x2_capable(psoc)) {
scm_debug("NDP present go for DBS scan");
goto end;
}
if (req->scan_req.scan_policy_high_accuracy) {
scm_debug("high accuracy scan received, going for non-dbs scan");
scan_dbs_policy = SCAN_DBS_POLICY_FORCE_NONDBS;
@@ -550,6 +560,8 @@ int scm_scan_get_burst_duration(int max_ch_time, bool miracast_enabled)
return burst_duration;
}
#define SCM_ACTIVE_DWELL_TIME_NAN 40
/**
* scm_req_update_concurrency_params() - update scan req params depending on
* concurrent mode present.
@@ -759,6 +771,17 @@ static void scm_req_update_concurrency_params(struct wlan_objmgr_vdev *vdev,
req->scan_req.dwell_time_active;
}
}
if (ndi_present) {
req->scan_req.dwell_time_active =
QDF_MIN(req->scan_req.dwell_time_active,
SCM_ACTIVE_DWELL_TIME_NAN);
req->scan_req.dwell_time_active_2g =
QDF_MIN(req->scan_req.dwell_time_active_2g,
SCM_ACTIVE_DWELL_TIME_NAN);
scm_debug("NDP active modify dwell time 2ghz %d",
req->scan_req.dwell_time_active_2g);
}
}
/**

View File

@@ -0,0 +1,199 @@
/*
* Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WMI_FILTERED_LOGGING_H
#define WMI_FILTERED_LOGGING_H
#include <qdf_debugfs.h>
#include "wmi_unified_priv.h"
#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
/**
* wmi_specific_cmd_record() - Record user specified command
* @wmi_handle: handle to WMI
* @id: cmd id
* @buf: buf containing cmd details
*
* Check if the command id is in target list,
* if found, record it.
*
* Context: the function will not sleep, caller is expected to hold
* proper locking.
*
* Return: none
*/
void wmi_specific_cmd_record(wmi_unified_t wmi_handle,
uint32_t id, uint8_t *buf);
/**
* wmi_specific_evt_record() - Record user specified event
* @wmi_handle: handle to WMI
* @id: cmd id
* @buf: buf containing event details
*
* Check if the event id is in target list,
* if found, record it.
*
* Context: the function will not sleep, caller is expected to hold
* proper locking.
*
* Return: none
*/
void wmi_specific_evt_record(wmi_unified_t wmi_handle,
uint32_t id, uint8_t *buf);
/**
* wmi_filtered_logging_init() - initialize filtered logging
* @wmi_handle: handle to WMI
*
* Context: the function will not sleep, no lock needed
*
* Return: none
*/
void wmi_filtered_logging_init(wmi_unified_t wmi_handle);
/**
* wmi_filtered_logging_free() - free the buffers for filtered logging
* @wmi_handle: handle to WMI
*
* Context: the function will not sleep, no lock needed
*
* Return: none
*/
void wmi_filtered_logging_free(wmi_unified_t wmi_handle);
/*
* Debugfs read/write functions
*/
/**
* debug_filtered_wmi_cmds_show() - debugfs read function for filtered_wmi_cmds
* @m: seq_file handle
* @v: not used, offset of read
* Return: number of bytes read
*/
int debug_filtered_wmi_cmds_show(qdf_debugfs_file_t m, void *v);
/**
* debug_filtered_wmi_evts_show() - debugfs read function for filtered_wmi_evts
* @m: seq_file handle
* @v: not used, offset of read
* Return: number of bytes read
*/
int debug_filtered_wmi_evts_show(qdf_debugfs_file_t m, void *v);
/**
* debug_wmi_filtered_command_log_show() - debugfs read function for
* wmi_filtered_command_log
* @m: seq_file handle
* @v: not used, offset of read
* Return: number of bytes read
*/
int debug_wmi_filtered_command_log_show(qdf_debugfs_file_t m, void *v);
/**
* debug_wmi_filtered_event_log_show() - debugfs read function for
* wmi_filtered_event_log
* @m: seq_file handle
* @v: not used, offset of read
* Return: number of bytes read
*/
int debug_wmi_filtered_event_log_show(qdf_debugfs_file_t m, void *v);
/**
* debug_wmi_filtered_wmi_cmds_write() - debugfs write for filtered_wmi_cmds
*
* @file: file handler to access wmi_handle
* @buf: received data buffer
* @count: length of received buffer
* @ppos: Not used
*
* Return: count
*/
ssize_t debug_filtered_wmi_cmds_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos);
/**
* debug_wmi_filtered_wmi_evts_write() - debugfs write for filtered_wmi_evts
*
* @file: file handler to access wmi_handle
* @buf: received data buffer
* @count: length of received buffer
* @ppos: Not used
*
* Return: count
*/
ssize_t debug_filtered_wmi_evts_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos);
/**
* debug_wmi_filtered_command_log_write() - debugfs write for
* filtered_command_log
*
* @file: file handler to access wmi_handle
* @buf: received data buffer
* @count: length of received buffer
* @ppos: Not used
*
* Return: count
*/
ssize_t debug_wmi_filtered_command_log_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos);
/**
* debug_wmi_filtered_event_log_write() - debugfs write for filtered_event_log
*
* @file: file handler to access wmi_handle
* @buf: received data buffer
* @count: length of received buffer
* @ppos: Not used
*
* Return: count
*/
ssize_t debug_wmi_filtered_event_log_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos);
#else /* WMI_INTERFACE_FILTERED_EVENT_LOGGING */
static inline void wmi_specific_cmd_record(wmi_unified_t wmi_handle,
uint32_t id, uint8_t *buf)
{
/* do nothing */
}
static inline void wmi_specific_evt_record(wmi_unified_t wmi_handle,
uint32_t id, uint8_t *buf)
{
/* do nothing */
}
static inline void wmi_filtered_logging_init(wmi_unified_t wmi_handle)
{
/* do nothing */
}
static inline void wmi_filtered_logging_free(wmi_unified_t wmi_handle)
{
/* do nothing */
}
#endif /* end of WMI_INTERFACE_FILTERED_EVENT_LOGGING */
#endif /*WMI_FILTERED_LOGGING_H*/

View File

@@ -116,7 +116,9 @@ struct wmi_ext_dbg_msg {
#define WMI_EVENT_DEBUG_MAX_ENTRY (1024)
#endif
#ifndef WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH
#define WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH (16)
#endif
/* wmi_mgmt commands */
#ifndef WMI_MGMT_EVENT_DEBUG_MAX_ENTRY
@@ -127,6 +129,16 @@ struct wmi_ext_dbg_msg {
#define WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY (256)
#endif
#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
#ifndef WMI_FILTERED_CMD_EVT_SUPPORTED
#define WMI_FILTERED_CMD_EVT_SUPPORTED (10)
#endif
#ifndef WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY
#define WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY (1024)
#endif
#endif /* WMI_INTERFACE_FILTERED_EVENT_LOGGING */
#define wmi_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_WMI, ## params)
#define wmi_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_WMI, ## params)
#define wmi_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_WMI, ## params)
@@ -234,6 +246,14 @@ struct wmi_log_buf_t {
* @wmi_id_to_name - Function refernce to API to convert Command id to
* string name
* @wmi_log_debugfs_dir - refernce to debugfs directory
* @filtered_wmi_cmds - Buffer to save inputs from user on
* which WMI commands to record
* @filtered_wmi_cmds_idx - target cmd index
* @filtered_wmi_evts - Buffer to save inputs from user on
* which WMI event to record
* @filtered_wmi_evts_idx - target evt index
* @wmi_filtered_command_log - buffer to record user specified WMI commands
* @wmi_filtered_event_log - buffer to record user specified WMI events
*/
struct wmi_debug_log_info {
struct wmi_log_buf_t wmi_command_log_buf_info;
@@ -250,6 +270,25 @@ struct wmi_debug_log_info {
qdf_spinlock_t wmi_record_lock;
bool wmi_logging_enable;
struct dentry *wmi_log_debugfs_dir;
#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
uint32_t *filtered_wmi_cmds;
uint32_t filtered_wmi_cmds_idx;
uint32_t *filtered_wmi_evts;
uint32_t filtered_wmi_evts_idx;
struct wmi_log_buf_t *wmi_filtered_command_log;
struct wmi_log_buf_t *wmi_filtered_event_log;
#endif
};
/**
* enum WMI_RECORD_TYPE - User specified WMI logging types
* @ WMI_CMD - wmi command id
* @ WMI_EVT - wmi event id
*/
enum WMI_RECORD_TYPE {
WMI_CMD = 1,
WMI_EVT = 2,
};
#endif /*WMI_INTERFACE_EVENT_LOGGING */
@@ -2210,7 +2249,14 @@ struct wmi_host_abi_version {
uint32_t abi_version_ns_3;
};
/* number of debugfs entries used */
#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
/* filtered logging added 4 more entries */
#define NUM_DEBUG_INFOS 13
#else
#define NUM_DEBUG_INFOS 9
#endif
struct wmi_unified {
void *scn_handle; /* handle to device */
osdev_t osdev; /* handle to use OS-independent services */

View File

@@ -0,0 +1,513 @@
/*
* Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#include "wmi_filtered_logging.h"
static struct wmi_log_buf_t *wmi_log_buf_allocate(void)
{
struct wmi_log_buf_t *cmd_log_buf;
int buf_size = WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY *
sizeof(struct wmi_command_debug);
cmd_log_buf = qdf_mem_malloc(sizeof(struct wmi_log_buf_t));
if (!cmd_log_buf)
return NULL;
cmd_log_buf->buf = qdf_mem_malloc(buf_size);
if (!cmd_log_buf->buf) {
qdf_mem_free(cmd_log_buf);
return NULL;
}
cmd_log_buf->length = 0;
cmd_log_buf->buf_tail_idx = 0;
cmd_log_buf->size = WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY;
cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
return cmd_log_buf;
}
void wmi_filtered_logging_init(wmi_unified_t wmi_handle)
{
int buf_size = WMI_FILTERED_CMD_EVT_SUPPORTED * sizeof(int);
/* alloc buffer to save user inputs, for WMI_CMD */
wmi_handle->log_info.filtered_wmi_cmds =
qdf_mem_malloc(buf_size);
if (!wmi_handle->log_info.filtered_wmi_cmds)
return;
wmi_handle->log_info.filtered_wmi_cmds_idx = 0;
/* alloc buffer to save user interested WMI commands */
wmi_handle->log_info.wmi_filtered_command_log = wmi_log_buf_allocate();
if (!wmi_handle->log_info.wmi_filtered_command_log)
goto fail1;
/* alloc buffer to save user inputs, for WMI_EVT */
wmi_handle->log_info.filtered_wmi_evts =
qdf_mem_malloc(buf_size);
if (!wmi_handle->log_info.filtered_wmi_evts)
goto fail2;
wmi_handle->log_info.filtered_wmi_evts_idx = 0;
/* alloc buffer to save user interested WMI events */
wmi_handle->log_info.wmi_filtered_event_log = wmi_log_buf_allocate();
if (!wmi_handle->log_info.wmi_filtered_event_log)
goto fail3;
return;
fail3:
qdf_mem_free(wmi_handle->log_info.filtered_wmi_evts);
wmi_handle->log_info.filtered_wmi_evts = NULL;
fail2:
qdf_mem_free(wmi_handle->log_info.wmi_filtered_command_log);
wmi_handle->log_info.wmi_filtered_command_log = NULL;
fail1:
qdf_mem_free(wmi_handle->log_info.filtered_wmi_cmds);
wmi_handle->log_info.filtered_wmi_cmds = NULL;
}
void wmi_filtered_logging_free(wmi_unified_t wmi_handle)
{
if (!wmi_handle)
return;
qdf_mem_free(wmi_handle->log_info.filtered_wmi_cmds);
wmi_handle->log_info.filtered_wmi_cmds = NULL;
qdf_mem_free(wmi_handle->log_info.filtered_wmi_evts);
wmi_handle->log_info.filtered_wmi_evts = NULL;
if (wmi_handle->log_info.wmi_filtered_command_log) {
qdf_mem_free(wmi_handle->log_info.
wmi_filtered_command_log->buf);
wmi_handle->log_info.wmi_filtered_command_log->buf = NULL;
qdf_mem_free(wmi_handle->log_info.wmi_filtered_command_log);
wmi_handle->log_info.wmi_filtered_command_log = NULL;
}
if (wmi_handle->log_info.wmi_filtered_event_log) {
qdf_mem_free(wmi_handle->log_info.
wmi_filtered_event_log->buf);
wmi_handle->log_info.wmi_filtered_event_log->buf = NULL;
qdf_mem_free(wmi_handle->log_info.wmi_filtered_event_log);
wmi_handle->log_info.wmi_filtered_event_log = NULL;
}
}
/*
* Reset the buffer which saves user interested cmds/evts
*/
static int wmi_reset_filtered_buffers(wmi_unified_t wmi_handle,
struct wmi_log_buf_t *cmd_log_buf)
{
int buf_size = WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY *
sizeof(struct wmi_command_debug);
if (!cmd_log_buf)
return 0;
cmd_log_buf->length = 0;
cmd_log_buf->buf_tail_idx = 0;
cmd_log_buf->size = WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY;
cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
qdf_mem_zero(cmd_log_buf->buf, buf_size);
return 0;
}
/*
* Check if id is in id list,
* return true if found.
*/
static bool wmi_id_in_list(uint32_t *id_list, uint32_t id)
{
int i;
if (!id_list)
return false;
for (i = 0; i < WMI_FILTERED_CMD_EVT_SUPPORTED; i++) {
if (id == id_list[i]) {
/* id already in target list */
return true;
}
}
return false;
}
/*
* Add command or event ids to list to be recorded
*/
static int wmi_add_to_record_list(wmi_unified_t wmi_handle,
uint32_t id,
enum WMI_RECORD_TYPE record_type)
{
uint32_t *target_list;
if (record_type == WMI_CMD) {
target_list = wmi_handle->log_info.filtered_wmi_cmds;
/* check if id already in target list */
if (wmi_id_in_list(target_list, id))
return 0;
if (wmi_handle->log_info.filtered_wmi_cmds_idx >=
WMI_FILTERED_CMD_EVT_SUPPORTED) {
wmi_handle->log_info.filtered_wmi_cmds_idx = 0;
}
target_list[wmi_handle->log_info.filtered_wmi_cmds_idx] = id;
wmi_handle->log_info.filtered_wmi_cmds_idx++;
} else if (record_type == WMI_EVT) {
target_list = wmi_handle->log_info.filtered_wmi_evts;
/* check if id already in target list */
if (wmi_id_in_list(target_list, id))
return 0;
if (wmi_handle->log_info.filtered_wmi_evts_idx >=
WMI_FILTERED_CMD_EVT_SUPPORTED) {
wmi_handle->log_info.filtered_wmi_evts_idx = 0;
}
target_list[wmi_handle->log_info.filtered_wmi_evts_idx] = id;
wmi_handle->log_info.filtered_wmi_evts_idx++;
} else {
return -EINVAL;
}
return 0;
}
static void wmi_specific_cmd_evt_record(uint32_t id, uint8_t *buf,
struct wmi_log_buf_t *log_buffer)
{
int idx;
struct wmi_command_debug *tmpbuf =
(struct wmi_command_debug *)log_buffer->buf;
if (*log_buffer->p_buf_tail_idx >= WMI_FILTERED_CMD_EVT_MAX_NUM_ENTRY)
*log_buffer->p_buf_tail_idx = 0;
idx = *log_buffer->p_buf_tail_idx;
tmpbuf[idx].command = id;
qdf_mem_copy(tmpbuf[idx].data, buf,
WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH);
tmpbuf[idx].time = qdf_get_log_timestamp();
(*log_buffer->p_buf_tail_idx)++;
log_buffer->length++;
}
void wmi_specific_cmd_record(wmi_unified_t wmi_handle,
uint32_t id, uint8_t *buf)
{
uint32_t *target_list;
struct wmi_log_buf_t *log_buffer;
target_list = wmi_handle->log_info.filtered_wmi_cmds;
if (!target_list)
return;
log_buffer = wmi_handle->log_info.wmi_filtered_command_log;
if (!log_buffer)
return;
if (wmi_id_in_list(target_list, id)) {
/* id in target list, need to be recorded */
wmi_specific_cmd_evt_record(id, buf, log_buffer);
}
}
void wmi_specific_evt_record(wmi_unified_t wmi_handle,
uint32_t id, uint8_t *buf)
{
uint32_t *target_list;
struct wmi_log_buf_t *log_buffer;
target_list = wmi_handle->log_info.filtered_wmi_evts;
if (!target_list)
return;
log_buffer = wmi_handle->log_info.wmi_filtered_event_log;
if (!log_buffer)
return;
if (wmi_id_in_list(target_list, id)) {
/* id in target list, need to be recorded */
wmi_specific_cmd_evt_record(id, buf, log_buffer);
}
}
/*
* Debugfs read/write functions
*/
static int wmi_filtered_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
{
va_list args;
va_start(args, f);
seq_vprintf(m, f, args);
va_end(args);
return 0;
}
/*
* debugfs show/read for filtered_wmi_cmds
*/
int debug_filtered_wmi_cmds_show(qdf_debugfs_file_t m, void *v)
{
wmi_unified_t wmi_handle = (wmi_unified_t)m->private;
int i;
int *target_list;
target_list = wmi_handle->log_info.filtered_wmi_cmds;
if (!target_list)
return 0;
for (i = 0; i < WMI_FILTERED_CMD_EVT_SUPPORTED; i++) {
if (target_list[i] != 0) {
wmi_filtered_seq_printf(m, "0x%x ",
target_list[i]);
}
}
wmi_filtered_seq_printf(m, "\n");
return 0;
}
int debug_filtered_wmi_evts_show(qdf_debugfs_file_t m, void *v)
{
wmi_unified_t wmi_handle = (wmi_unified_t)m->private;
int i;
int *target_list;
target_list = wmi_handle->log_info.filtered_wmi_evts;
if (!target_list)
return 0;
for (i = 0; i < WMI_FILTERED_CMD_EVT_SUPPORTED; i++) {
if (target_list[i] != 0) {
wmi_filtered_seq_printf(m, "0x%x ",
target_list[i]);
}
}
wmi_filtered_seq_printf(m, "\n");
return 0;
}
static int wmi_log_show(wmi_unified_t wmi_handle, void *buf,
qdf_debugfs_file_t m)
{
struct wmi_log_buf_t *wmi_log = (struct wmi_log_buf_t *)buf;
int pos, nread, outlen;
int i;
uint64_t secs, usecs;
int wmi_ring_size = 100;
qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
if (!wmi_log->length) {
qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
return wmi_filtered_seq_printf(m,
"Nothing to read!\n");
}
if (wmi_log->length <= wmi_ring_size)
nread = wmi_log->length;
else
nread = wmi_ring_size;
if (*wmi_log->p_buf_tail_idx == 0)
/* tail can be 0 after wrap-around */
pos = wmi_ring_size - 1;
else
pos = *wmi_log->p_buf_tail_idx - 1;
outlen = wmi_filtered_seq_printf(m, "Length = %d\n", wmi_log->length);
qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
while (nread--) {
struct wmi_event_debug *wmi_record;
wmi_record = &(((struct wmi_event_debug *)wmi_log->buf)[pos]);
qdf_log_timestamp_to_secs(wmi_record->time, &secs,
&usecs);
outlen += wmi_filtered_seq_printf(m, "Event ID = %x\n",
(wmi_record->event));
outlen +=
wmi_filtered_seq_printf(m,
"Event TIME = [%llu.%06llu]\n",
secs, usecs);
outlen += wmi_filtered_seq_printf(m, "CMD = ");
for (i = 0; i < (WMI_EVENT_DEBUG_ENTRY_MAX_LENGTH /
sizeof(uint32_t)); i++)
outlen += wmi_filtered_seq_printf(m, "%x ",
wmi_record->data[i]);
outlen += wmi_filtered_seq_printf(m, "\n");
if (pos == 0)
pos = wmi_ring_size - 1;
else
pos--;
}
return outlen;
}
int debug_wmi_filtered_command_log_show(qdf_debugfs_file_t m, void *v)
{
wmi_unified_t wmi_handle = (wmi_unified_t)m->private;
struct wmi_log_buf_t *wmi_log =
wmi_handle->log_info.wmi_filtered_command_log;
if (!wmi_log)
return 0;
return wmi_log_show(wmi_handle, wmi_log, m);
}
int debug_wmi_filtered_event_log_show(qdf_debugfs_file_t m, void *v)
{
wmi_unified_t wmi_handle = (wmi_unified_t)m->private;
struct wmi_log_buf_t *wmi_log =
wmi_handle->log_info.wmi_filtered_event_log;
if (!wmi_log)
return 0;
return wmi_log_show(wmi_handle, wmi_log, m);
}
ssize_t debug_filtered_wmi_cmds_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
wmi_unified_t wmi_handle =
((struct seq_file *)file->private_data)->private;
int k, ret;
char locbuf[12] = {0};
int buf_size = WMI_FILTERED_CMD_EVT_SUPPORTED * sizeof(int);
if ((!buf) || (count > 8 || count <= 0))
return -EFAULT;
if (!wmi_handle->log_info.filtered_wmi_cmds)
return -EFAULT;
if (copy_from_user(locbuf, buf, count))
return -EFAULT;
ret = qdf_kstrtoint(locbuf, 16, &k);
if (ret)
return -EINVAL;
if (k == 0xffff) {
qdf_mem_zero(wmi_handle->log_info.filtered_wmi_cmds, buf_size);
wmi_handle->log_info.filtered_wmi_cmds_idx = 0;
return count;
}
if (wmi_add_to_record_list(wmi_handle, k, WMI_CMD)) {
WMI_LOGE("Add cmd %d to WMI_CMD list failed");
return 0;
}
return count;
}
ssize_t debug_filtered_wmi_evts_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
wmi_unified_t wmi_handle =
((struct seq_file *)file->private_data)->private;
int k, ret;
char locbuf[12] = {0};
int buf_size = WMI_FILTERED_CMD_EVT_SUPPORTED * sizeof(int);
if ((!buf) || (count > 8 || count <= 0))
return -EFAULT;
if (!wmi_handle->log_info.filtered_wmi_evts)
return -EFAULT;
if (copy_from_user(locbuf, buf, count))
return -EFAULT;
ret = qdf_kstrtoint(locbuf, 16, &k);
if (ret)
return -EINVAL;
if (k == 0xffff) {
qdf_mem_zero(wmi_handle->log_info.filtered_wmi_evts, buf_size);
wmi_handle->log_info.filtered_wmi_evts_idx = 0;
return count;
}
if (wmi_add_to_record_list(wmi_handle, k, WMI_EVT)) {
WMI_LOGE("Add cmd %d to WMI_EVT list failed");
return 0;
}
return count;
}
ssize_t debug_wmi_filtered_command_log_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
wmi_unified_t wmi_handle =
((struct seq_file *)file->private_data)->private;
int k, ret;
char locbuf[12] = {0};
struct wmi_log_buf_t *cmd_log_buf;
if ((!buf) || (count > 8 || count <= 0))
return -EFAULT;
if (copy_from_user(locbuf, buf, count))
return -EFAULT;
ret = qdf_kstrtoint(locbuf, 16, &k);
if (ret)
return -EINVAL;
if (k != 0xffff)
return -EINVAL;
cmd_log_buf = wmi_handle->log_info.wmi_filtered_command_log;
if (wmi_reset_filtered_buffers(wmi_handle, cmd_log_buf))
WMI_LOGE("reset WMI CMD filtered_buffers failed");
return count;
}
ssize_t debug_wmi_filtered_event_log_write(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
wmi_unified_t wmi_handle =
((struct seq_file *)file->private_data)->private;
int k, ret;
char locbuf[12] = {0};
struct wmi_log_buf_t *cmd_log_buf;
if ((!buf) || (count > 8 || count <= 0))
return -EFAULT;
if (copy_from_user(locbuf, buf, count))
return -EFAULT;
ret = qdf_kstrtoint(locbuf, 16, &k);
if (ret)
return -EINVAL;
if (k != 0xffff)
return -EINVAL;
cmd_log_buf = wmi_handle->log_info.wmi_filtered_event_log;
if (wmi_reset_filtered_buffers(wmi_handle, cmd_log_buf))
WMI_LOGE("reset WMI EVT filtered_buffers failed");
return count;
}

View File

@@ -36,6 +36,8 @@
#include <linux/debugfs.h>
#include <target_if.h>
#include <qdf_debugfs.h>
#include "wmi_filtered_logging.h"
/* This check for CONFIG_WIN temporary added due to redeclaration compilation
error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
@@ -101,7 +103,7 @@ typedef PREPACK struct {
#ifdef WMI_INTERFACE_EVENT_LOGGING
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
/* TODO Cleanup this backported function */
static int wmi_bp_seq_printf(struct seq_file *m, const char *f, ...)
static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
{
va_list args;
@@ -813,6 +815,8 @@ static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
wmi_handle->log_info.wmi_logging_enable = 1;
wmi_filtered_logging_init(wmi_handle);
return QDF_STATUS_SUCCESS;
}
#endif
@@ -827,6 +831,8 @@ static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
#ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
{
wmi_filtered_logging_free(wmi_handle);
if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
@@ -850,6 +856,7 @@ static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
qdf_mem_free(
wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
wmi_handle->log_info.wmi_logging_enable = 0;
qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
}
#else
@@ -1357,6 +1364,12 @@ GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
GENERATE_DEBUG_STRUCTS(wmi_enable);
GENERATE_DEBUG_STRUCTS(wmi_log_size);
#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
#endif
struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
DEBUG_FOO(wmi_command_log),
@@ -1368,6 +1381,12 @@ struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
DEBUG_FOO(wmi_mgmt_event_log),
DEBUG_FOO(wmi_enable),
DEBUG_FOO(wmi_log_size),
#ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
DEBUG_FOO(filtered_wmi_cmds),
DEBUG_FOO(filtered_wmi_evts),
DEBUG_FOO(wmi_filtered_command_log),
DEBUG_FOO(wmi_filtered_event_log),
#endif
};
@@ -1484,7 +1503,7 @@ void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
}
#else
@@ -1839,9 +1858,11 @@ QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
* WMI mgmt command already recorded in wmi_mgmt_cmd_record
*/
if (wmi_handle->ops->is_management_record(cmd_id) == false) {
WMI_COMMAND_RECORD(wmi_handle, cmd_id,
qdf_nbuf_data(buf) +
wmi_handle->soc->buf_offset_command);
uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
wmi_handle->soc->buf_offset_command;
WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
}
qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
}
@@ -2430,8 +2451,11 @@ void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
* as its already logged in WMI RX event buffer
*/
} else {
WMI_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
wmi_handle->soc->buf_offset_event));
uint8_t *tmpbuf = (uint8_t *)data +
wmi_handle->soc->buf_offset_event;
WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
wmi_specific_evt_record(wmi_handle, id, tmpbuf);
}
qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
}