qcacld-3.0: dp: change legacy data path api to cdp api

Remove legacy apis call from out side of data path.
Replace legacy apis to cdp apis.
Introduce cdp apis wrappers.
dp module.

Change-Id: I345abb70b6ddd7f5768cea2d933e0023c5742b4a
CRs-fixed: 1075736
This commit is contained in:
Leo Chang
2016-10-28 11:07:18 -07:00
committed by qcabuildsw
parent 9b09703f79
commit 9872676b91
16 changed files with 782 additions and 335 deletions

View File

@@ -132,6 +132,13 @@ struct htt_host_rx_desc_base {
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.map_index) (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.map_index)
#define HTT_RX_RING_BUFF_DBG_LIST 1024 #define HTT_RX_RING_BUFF_DBG_LIST 1024
#ifdef MSM_PLATFORM
#define HTT_ADDRESS_MASK 0xfffffffffffffffe
#else
#define HTT_ADDRESS_MASK 0xfffffffe
#endif /* MSM_PLATFORM */
struct rx_buf_debug { struct rx_buf_debug {
qdf_dma_addr_t paddr; qdf_dma_addr_t paddr;
qdf_nbuf_t nbuf; qdf_nbuf_t nbuf;

View File

@@ -448,8 +448,8 @@ void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
msg_start_ptr, msg_start_ptr,
sizeof(struct htt_wdi_ipa_op_response_t) + sizeof(struct htt_wdi_ipa_op_response_t) +
len); len);
ol_txrx_ipa_uc_op_response(pdev->txrx_pdev, cdp_ipa_op_response(cds_get_context(QDF_MODULE_ID_SOC),
op_msg_buffer); pdev->txrx_pdev, op_msg_buffer);
break; break;
} }

View File

@@ -516,7 +516,7 @@ unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev);
* @param pdev - handle to the physical device * @param pdev - handle to the physical device
*/ */
unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev); unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev);
void ol_cfg_set_ipa_uc_tx_partition_base(ol_pdev_handle pdev, uint32_t value); void ol_cfg_set_ipa_uc_tx_partition_base(void *pdev, uint32_t value);
#else #else
static inline unsigned int ol_cfg_ipa_uc_offload_enabled( static inline unsigned int ol_cfg_ipa_uc_offload_enabled(
ol_pdev_handle pdev) ol_pdev_handle pdev)
@@ -549,7 +549,7 @@ static inline unsigned int ol_cfg_ipa_uc_tx_partition_base(
} }
static inline void ol_cfg_set_ipa_uc_tx_partition_base( static inline void ol_cfg_set_ipa_uc_tx_partition_base(
ol_pdev_handle pdev, uint32_t value) void *pdev, uint32_t value)
{ {
return; return;
} }
@@ -564,8 +564,9 @@ static inline void ol_cfg_set_ipa_uc_tx_partition_base(
* *
* Return: None * Return: None
*/ */
static inline void ol_set_cfg_flow_steering(ol_pdev_handle pdev, uint8_t val) static inline void ol_set_cfg_flow_steering(void *ppdev, uint8_t val)
{ {
ol_pdev_handle pdev = ppdev;
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev; struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
cfg->flow_steering_enabled = val; cfg->flow_steering_enabled = val;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2014 The Linux Foundation. All rights reserved. * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
* *
* Previously licensed under the ISC license by Qualcomm Atheros, Inc. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
* *
@@ -38,6 +38,34 @@ struct wmi_unified;
typedef struct wmi_unified *wmi_unified_t; typedef struct wmi_unified *wmi_unified_t;
typedef void *ol_scn_t; typedef void *ol_scn_t;
/**
* ol_txrx_pdev_handle - opaque handle for txrx physical device
* object
*/
struct ol_txrx_pdev_t;
typedef struct ol_txrx_pdev_t *ol_txrx_pdev_handle;
/**
* ol_txrx_vdev_handle - opaque handle for txrx virtual device
* object
*/
struct ol_txrx_vdev_t;
typedef struct ol_txrx_vdev_t *ol_txrx_vdev_handle;
/**
* ol_pdev_handle - opaque handle for the configuration
* associated with the physical device
*/
struct ol_pdev_t;
typedef struct ol_pdev_t *ol_pdev_handle;
/**
* ol_txrx_peer_handle - opaque handle for txrx peer object
*/
struct ol_txrx_peer_t;
typedef struct ol_txrx_peer_t *ol_txrx_peer_handle;
/** /**
* @wmi_event_handler function prototype * @wmi_event_handler function prototype
*/ */

View File

@@ -41,6 +41,7 @@
#include <htc_api.h> /* HTC_HANDLE */ #include <htc_api.h> /* HTC_HANDLE */
#include "htt.h" /* htt_dbg_stats_type, etc. */ #include "htt.h" /* htt_dbg_stats_type, etc. */
#include <cdp_txrx_cmn.h> /* ol_pdev_handle */ #include <cdp_txrx_cmn.h> /* ol_pdev_handle */
#include <ol_defines.h>
/* TID */ /* TID */
#define OL_HTT_TID_NON_QOS_UNICAST 16 #define OL_HTT_TID_NON_QOS_UNICAST 16

View File

@@ -41,40 +41,10 @@
#include <wlan_defs.h> /* MAX_SPATIAL_STREAM */ #include <wlan_defs.h> /* MAX_SPATIAL_STREAM */
#include <cdp_txrx_cmn.h> /* ol_pdev_handle, ol_vdev_handle, etc */ #include <cdp_txrx_cmn.h> /* ol_pdev_handle, ol_vdev_handle, etc */
#include <cdp_txrx_cfg.h> #include <cdp_txrx_cfg.h>
#include <ol_defines.h>
#define OL_ATH_TX_DRAIN_WAIT_DELAY 50 #define OL_ATH_TX_DRAIN_WAIT_DELAY 50
/* Maximum number of station supported by data path, including BC. */
#define WLAN_MAX_STA_COUNT (HAL_NUM_STA)
/* The symbolic station ID return to HDD to specify the packet is bc/mc */
#define WLAN_RX_BCMC_STA_ID (WLAN_MAX_STA_COUNT + 1)
/* The symbolic station ID return to HDD to specify the packet is
to soft-AP itself */
#define WLAN_RX_SAP_SELF_STA_ID (WLAN_MAX_STA_COUNT + 2)
#define OL_TXQ_PAUSE_REASON_FW (1 << 0)
#define OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED (1 << 1)
#define OL_TXQ_PAUSE_REASON_TX_ABORT (1 << 2)
#define OL_TXQ_PAUSE_REASON_VDEV_STOP (1 << 3)
#define OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION (1 << 4)
/* command options for dumpStats*/
#define WLAN_HDD_STATS 0
#define WLAN_TXRX_STATS 1
#define WLAN_TXRX_HIST_STATS 2
#define WLAN_TXRX_TSO_STATS 3
#define WLAN_HDD_NETIF_OPER_HISTORY 4
#define WLAN_DUMP_TX_FLOW_POOL_INFO 5
#define WLAN_TXRX_DESC_STATS 6
#define WLAN_HIF_STATS 7
#define WLAN_LRO_STATS 8
#define WLAN_SCHEDULER_STATS 21
#define WLAN_TX_QUEUE_STATS 22
#define WLAN_BUNDLE_STATS 23
#define WLAN_CREDIT_STATS 24
/** /**
* @brief Set up the data SW subsystem. * @brief Set up the data SW subsystem.
* @details * @details
@@ -97,7 +67,7 @@
* @return 0 for success or error code * @return 0 for success or error code
*/ */
int int
ol_txrx_pdev_post_attach(ol_txrx_pdev_handle pdev); ol_txrx_pdev_post_attach(void *pdev);
/** /**
* @brief Parameter type to be input to ol_txrx_peer_update * @brief Parameter type to be input to ol_txrx_peer_update
@@ -138,43 +108,7 @@ ol_txrx_peer_update(ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac,
union ol_txrx_peer_update_param_t *param, union ol_txrx_peer_update_param_t *param,
enum ol_txrx_peer_update_select_t select); enum ol_txrx_peer_update_select_t select);
enum {
OL_TX_WMM_AC_BE,
OL_TX_WMM_AC_BK,
OL_TX_WMM_AC_VI,
OL_TX_WMM_AC_VO,
OL_TX_NUM_WMM_AC
};
/**
* @brief Parameter type to pass WMM setting to ol_txrx_set_wmm_param
* @details
* The struct is used to specify informaiton to update TX WMM scheduler.
*/
struct ol_tx_ac_param_t {
uint32_t aifs;
uint32_t cwmin;
uint32_t cwmax;
};
struct ol_tx_wmm_param_t {
struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC];
};
#if defined(CONFIG_HL_SUPPORT) #if defined(CONFIG_HL_SUPPORT)
/**
* @brief Set paramters of WMM scheduler per AC settings. .
* @details
* This function applies only to HL systems.
*
* @param data_pdev - the physical device being paused
* @param wmm_param - the wmm parameters
*/
void
ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
struct ol_tx_wmm_param_t wmm_param);
/** /**
* @brief notify tx data SW that a peer-TID is ready to transmit to. * @brief notify tx data SW that a peer-TID is ready to transmit to.
* @details * @details
@@ -251,14 +185,6 @@ void
ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev); ol_txrx_throttle_unpause(ol_txrx_pdev_handle data_pdev);
#else #else
static inline
void ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev,
struct ol_tx_wmm_param_t wmm_param)
{
return;
}
static inline void static inline void
ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid) ol_txrx_peer_tid_unpause(ol_txrx_peer_handle data_peer, int tid)
{ {
@@ -382,13 +308,9 @@ typedef void
* @param ctxt - the context argument provided to the callback function * @param ctxt - the context argument provided to the callback function
*/ */
void void
ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle data_vdev, ol_txrx_data_tx_cb_set(void *data_vdev,
ol_txrx_data_tx_cb callback, void *ctxt); ol_txrx_data_tx_cb callback, void *ctxt);
#ifdef FEATURE_RUNTIME_PM
QDF_STATUS ol_txrx_runtime_suspend(ol_txrx_pdev_handle txrx_pdev);
QDF_STATUS ol_txrx_runtime_resume(ol_txrx_pdev_handle txrx_pdev);
#endif
QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout); QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout);
@@ -503,12 +425,10 @@ ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
#define ol_txrx_peer_stats_copy(pdev, peer, stats) A_ERROR /* failure */ #define ol_txrx_peer_stats_copy(pdev, peer, stats) A_ERROR /* failure */
#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */ #endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
QDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id); QDF_STATUS ol_txrx_get_vdevid(void *peer, uint8_t *vdev_id);
void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id); void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id);
#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
#define OL_TXRX_RSSI_INVALID 0xffff #define OL_TXRX_RSSI_INVALID 0xffff
/** /**
* @brief Provide the current RSSI average from data frames sent by a peer. * @brief Provide the current RSSI average from data frames sent by a peer.
@@ -536,17 +456,6 @@ int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer);
#define ol_txrx_peer_rssi(peer) OL_TXRX_RSSI_INVALID #define ol_txrx_peer_rssi(peer) OL_TXRX_RSSI_INVALID
#endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */ #endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
/*
* Bins used for reporting delay histogram:
* bin 0: 0 - 10 ms delay
* bin 1: 10 - 20 ms delay
* bin 2: 20 - 40 ms delay
* bin 3: 40 - 80 ms delay
* bin 4: 80 - 160 ms delay
* bin 5: > 160 ms delay
*/
#define QCA_TX_DELAY_HIST_REPORT_BINS 6
#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL) #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
/** /**
@@ -557,7 +466,7 @@ int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer);
*/ */
void void
ol_txrx_bad_peer_txctl_set_setting( ol_txrx_bad_peer_txctl_set_setting(
struct ol_txrx_pdev_t *pdev, void *pdev,
int enable, int enable,
int period, int period,
int txq_limit); int txq_limit);
@@ -570,7 +479,7 @@ ol_txrx_bad_peer_txctl_set_setting(
*/ */
void void
ol_txrx_bad_peer_txctl_update_threshold( ol_txrx_bad_peer_txctl_update_threshold(
struct ol_txrx_pdev_t *pdev, void *pdev,
int level, int level,
int tput_thresh, int tput_thresh,
int tx_limit); int tx_limit);
@@ -579,7 +488,7 @@ ol_txrx_bad_peer_txctl_update_threshold(
static inline void static inline void
ol_txrx_bad_peer_txctl_set_setting( ol_txrx_bad_peer_txctl_set_setting(
struct ol_txrx_pdev_t *pdev, void *pdev,
int enable, int enable,
int period, int period,
int txq_limit) int txq_limit)
@@ -589,7 +498,7 @@ ol_txrx_bad_peer_txctl_set_setting(
static inline void static inline void
ol_txrx_bad_peer_txctl_update_threshold( ol_txrx_bad_peer_txctl_update_threshold(
struct ol_txrx_pdev_t *pdev, void *pdev,
int level, int level,
int tput_thresh, int tput_thresh,
int tx_limit) int tx_limit)

View File

@@ -37,6 +37,7 @@
#include <qdf_nbuf.h> /* qdf_nbuf_t */ #include <qdf_nbuf.h> /* qdf_nbuf_t */
#include <cdp_txrx_cmn.h> /* ol_txrx_pdev_handle */ #include <cdp_txrx_cmn.h> /* ol_txrx_pdev_handle */
#include <ol_defines.h>
static inline uint16_t *ol_tx_msdu_id_storage(qdf_nbuf_t msdu) static inline uint16_t *ol_tx_msdu_id_storage(qdf_nbuf_t msdu)
{ {

View File

@@ -82,7 +82,7 @@ qdf_nbuf_t ol_txrx_osif_tso_segment(ol_txrx_vdev_handle txrx_vdev,
int max_seg_payload_bytes, int max_seg_payload_bytes,
qdf_nbuf_t jumbo_tcp_frame); qdf_nbuf_t jumbo_tcp_frame);
qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle data_vdev, qdf_nbuf_t skb); qdf_nbuf_t ol_tx_data(void *data_vdev, qdf_nbuf_t skb);
void ol_rx_data_process(struct ol_txrx_peer_t *peer, void ol_rx_data_process(struct ol_txrx_peer_t *peer,
qdf_nbuf_t rx_buf_list); qdf_nbuf_t rx_buf_list);

View File

@@ -39,19 +39,18 @@ unsigned int vow_config = 0;
* *
* Return: none * Return: none
*/ */
static
void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx, void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
struct txrx_pdev_cfg_param_t cfg_param) struct txrx_pdev_cfg_param_t *cfg_param)
{ {
struct txrx_pdev_cfg_t *cfg_ctx = pcfg_ctx;
cfg_ctx->tx_flow_start_queue_offset = cfg_ctx->tx_flow_start_queue_offset =
cfg_param.tx_flow_start_queue_offset; cfg_param->tx_flow_start_queue_offset;
cfg_ctx->tx_flow_stop_queue_th = cfg_ctx->tx_flow_stop_queue_th =
cfg_param.tx_flow_stop_queue_th; cfg_param->tx_flow_stop_queue_th;
} }
#else #else
static
void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx, void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
struct txrx_pdev_cfg_param_t cfg_param) struct txrx_pdev_cfg_param_t *cfg_param)
{ {
return; return;
} }
@@ -117,9 +116,9 @@ uint8_t ol_defrag_timeout_check(void)
* Return: the control device object * Return: the control device object
*/ */
ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev, void *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
struct txrx_pdev_cfg_param_t cfg_param)
{ {
struct txrx_pdev_cfg_param_t *cfg_param = pcfg_param;
struct txrx_pdev_cfg_t *cfg_ctx; struct txrx_pdev_cfg_t *cfg_ctx;
cfg_ctx = qdf_mem_malloc(sizeof(*cfg_ctx)); cfg_ctx = qdf_mem_malloc(sizeof(*cfg_ctx));
@@ -148,21 +147,21 @@ ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev,
cfg_ctx->dutycycle_level[3] = THROTTLE_DUTY_CYCLE_LEVEL3; cfg_ctx->dutycycle_level[3] = THROTTLE_DUTY_CYCLE_LEVEL3;
cfg_ctx->rx_fwd_disabled = 0; cfg_ctx->rx_fwd_disabled = 0;
cfg_ctx->is_packet_log_enabled = 0; cfg_ctx->is_packet_log_enabled = 0;
cfg_ctx->is_full_reorder_offload = cfg_param.is_full_reorder_offload; cfg_ctx->is_full_reorder_offload = cfg_param->is_full_reorder_offload;
cfg_ctx->ipa_uc_rsc.uc_offload_enabled = cfg_ctx->ipa_uc_rsc.uc_offload_enabled =
cfg_param.is_uc_offload_enabled; cfg_param->is_uc_offload_enabled;
cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param.uc_tx_buffer_count; cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param->uc_tx_buffer_count;
cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param.uc_tx_buffer_size; cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param->uc_tx_buffer_size;
cfg_ctx->ipa_uc_rsc.rx_ind_ring_size = cfg_ctx->ipa_uc_rsc.rx_ind_ring_size =
cfg_param.uc_rx_indication_ring_count; cfg_param->uc_rx_indication_ring_count;
cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param.uc_tx_partition_base; cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param->uc_tx_partition_base;
cfg_ctx->enable_rxthread = cfg_param.enable_rxthread; cfg_ctx->enable_rxthread = cfg_param->enable_rxthread;
cfg_ctx->ip_tcp_udp_checksum_offload = cfg_ctx->ip_tcp_udp_checksum_offload =
cfg_param.ip_tcp_udp_checksum_offload; cfg_param->ip_tcp_udp_checksum_offload;
cfg_ctx->ce_classify_enabled = cfg_param.ce_classify_enabled; cfg_ctx->ce_classify_enabled = cfg_param->ce_classify_enabled;
ol_tx_set_flow_control_parameters(cfg_ctx, cfg_param); ol_tx_set_flow_control_parameters(cfg_ctx, cfg_param);
return (ol_pdev_handle) cfg_ctx; return (void *)cfg_ctx;
} }
int ol_cfg_is_high_latency(ol_pdev_handle pdev) int ol_cfg_is_high_latency(ol_pdev_handle pdev)
@@ -210,8 +209,9 @@ int ol_cfg_rx_fwd_check(ol_pdev_handle pdev)
* Currently only intra-bss fwd is supported. * Currently only intra-bss fwd is supported.
* *
*/ */
void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disable_rx_fwd) void ol_set_cfg_rx_fwd_disabled(void *ppdev, uint8_t disable_rx_fwd)
{ {
ol_pdev_handle pdev = ppdev;
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev; struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
cfg->rx_fwd_disabled = disable_rx_fwd; cfg->rx_fwd_disabled = disable_rx_fwd;
} }
@@ -223,8 +223,9 @@ void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disable_rx_fwd)
* @pdev - handle to the physical device * @pdev - handle to the physical device
* @val - 0 - disable, 1 - enable * @val - 0 - disable, 1 - enable
*/ */
void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, uint8_t val) void ol_set_cfg_packet_log_enabled(void *ppdev, uint8_t val)
{ {
ol_pdev_handle pdev = ppdev;
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev; struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
cfg->is_packet_log_enabled = val; cfg->is_packet_log_enabled = val;
} }
@@ -404,9 +405,9 @@ unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev)
return cfg->ipa_uc_rsc.tx_partition_base; return cfg->ipa_uc_rsc.tx_partition_base;
} }
void ol_cfg_set_ipa_uc_tx_partition_base(ol_pdev_handle pdev, uint32_t val) void ol_cfg_set_ipa_uc_tx_partition_base(void *pdev, uint32_t val)
{ {
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev; struct txrx_pdev_cfg_t *cfg = pdev;
cfg->ipa_uc_rsc.tx_partition_base = val; cfg->ipa_uc_rsc.tx_partition_base = val;
} }
#endif /* IPA_OFFLOAD */ #endif /* IPA_OFFLOAD */

View File

@@ -142,10 +142,11 @@ static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
* *
* Return: skb/NULL for success * Return: skb/NULL for success
*/ */
qdf_nbuf_t ol_tx_data(ol_txrx_vdev_handle vdev, qdf_nbuf_t skb) qdf_nbuf_t ol_tx_data(void *data_vdev, qdf_nbuf_t skb)
{ {
struct ol_txrx_pdev_t *pdev; struct ol_txrx_pdev_t *pdev;
qdf_nbuf_t ret; qdf_nbuf_t ret;
ol_txrx_vdev_handle vdev = data_vdev;
if (qdf_unlikely(!vdev)) { if (qdf_unlikely(!vdev)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN, QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
@@ -1650,9 +1651,11 @@ ol_tx_non_std_hl(ol_txrx_vdev_handle vdev,
* Return: null - success, skb - failure * Return: null - success, skb - failure
*/ */
qdf_nbuf_t qdf_nbuf_t
ol_tx_non_std(ol_txrx_vdev_handle vdev, ol_tx_non_std(void *pvdev,
enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list) enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
{ {
ol_txrx_vdev_handle vdev = pvdev;
if (vdev->pdev->cfg.is_high_latency) if (vdev->pdev->cfg.is_high_latency)
return ol_tx_non_std_hl(vdev, tx_spec, msdu_list); return ol_tx_non_std_hl(vdev, tx_spec, msdu_list);
else else
@@ -1660,9 +1663,10 @@ ol_tx_non_std(ol_txrx_vdev_handle vdev,
} }
void void
ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev, ol_txrx_data_tx_cb_set(void *pvdev,
ol_txrx_data_tx_cb callback, void *ctxt) ol_txrx_data_tx_cb callback, void *ctxt)
{ {
ol_txrx_vdev_handle vdev = pvdev;
struct ol_txrx_pdev_t *pdev = vdev->pdev; struct ol_txrx_pdev_t *pdev = vdev->pdev;
pdev->tx_data_callback.func = callback; pdev->tx_data_callback.func = callback;
pdev->tx_data_callback.ctxt = ctxt; pdev->tx_data_callback.ctxt = ctxt;
@@ -1689,11 +1693,12 @@ ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
* for a given type of management frame. * for a given type of management frame.
*/ */
void void
ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev, ol_txrx_mgmt_tx_cb_set(void *ppdev,
uint8_t type, uint8_t type,
ol_txrx_mgmt_tx_cb download_cb, ol_txrx_mgmt_tx_cb download_cb,
ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt) ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
{ {
ol_txrx_pdev_handle pdev = ppdev;
TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES); TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
pdev->tx_mgmt.callbacks[type].download_cb = download_cb; pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb; pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;

View File

@@ -577,8 +577,9 @@ ol_txrx_throttle_unpause(ol_txrx_pdev_handle pdev)
} }
void void
ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason) ol_txrx_vdev_pause(void *pvdev, uint32_t reason)
{ {
ol_txrx_vdev_handle vdev = pvdev;
struct ol_txrx_pdev_t *pdev = vdev->pdev; struct ol_txrx_pdev_t *pdev = vdev->pdev;
struct ol_txrx_peer_t *peer; struct ol_txrx_peer_t *peer;
/* TO DO: log the queue pause */ /* TO DO: log the queue pause */
@@ -599,8 +600,9 @@ ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
} }
void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason) void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason)
{ {
ol_txrx_vdev_handle vdev = pvdev;
struct ol_txrx_pdev_t *pdev = vdev->pdev; struct ol_txrx_pdev_t *pdev = vdev->pdev;
struct ol_txrx_peer_t *peer; struct ol_txrx_peer_t *peer;
/* TO DO: log the queue unpause */ /* TO DO: log the queue unpause */
@@ -624,8 +626,10 @@ void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__); TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
} }
void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev) void ol_txrx_vdev_flush(void *pvdev)
{ {
ol_txrx_vdev_handle vdev = pvdev;
ol_tx_queue_vdev_flush(vdev->pdev, vdev); ol_tx_queue_vdev_flush(vdev->pdev, vdev);
} }
@@ -814,9 +818,10 @@ ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
} }
void void
ol_txrx_bad_peer_txctl_set_setting(struct ol_txrx_pdev_t *pdev, ol_txrx_bad_peer_txctl_set_setting(void *ppdev,
int enable, int period, int txq_limit) int enable, int period, int txq_limit)
{ {
struct ol_txrx_pdev_t *pdev = ppdev;
if (enable) if (enable)
pdev->tx_peer_bal.enabled = ol_tx_peer_bal_enable; pdev->tx_peer_bal.enabled = ol_tx_peer_bal_enable;
else else
@@ -828,10 +833,12 @@ ol_txrx_bad_peer_txctl_set_setting(struct ol_txrx_pdev_t *pdev,
} }
void void
ol_txrx_bad_peer_txctl_update_threshold(struct ol_txrx_pdev_t *pdev, ol_txrx_bad_peer_txctl_update_threshold(void *ppdev,
int level, int tput_thresh, int level, int tput_thresh,
int tx_limit) int tx_limit)
{ {
struct ol_txrx_pdev_t *pdev = ppdev;
/* Set the current settingl */ /* Set the current settingl */
pdev->tx_peer_bal.ctl_thresh[level].tput_thresh = pdev->tx_peer_bal.ctl_thresh[level].tput_thresh =
tput_thresh; tput_thresh;
@@ -1685,8 +1692,10 @@ ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
* will be paused. * will be paused.
* *
*/ */
void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason) void ol_txrx_vdev_pause(void *pvdev, uint32_t reason)
{ {
ol_txrx_vdev_handle vdev = pvdev;
/* TO DO: log the queue pause */ /* TO DO: log the queue pause */
/* acquire the mutex lock, since we'll be modifying the queues */ /* acquire the mutex lock, since we'll be modifying the queues */
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__); TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
@@ -1710,8 +1719,9 @@ void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
* LL systems that use per-vdev tx queues for MCC or thermal throttling. * LL systems that use per-vdev tx queues for MCC or thermal throttling.
* *
*/ */
void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason) void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason)
{ {
ol_txrx_vdev_handle vdev = pvdev;
/* TO DO: log the queue unpause */ /* TO DO: log the queue unpause */
/* acquire the mutex lock, since we'll be modifying the queues */ /* acquire the mutex lock, since we'll be modifying the queues */
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__); TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
@@ -1746,8 +1756,9 @@ void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
* stale, and would need to be discarded. * stale, and would need to be discarded.
* *
*/ */
void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev) void ol_txrx_vdev_flush(void *pvdev)
{ {
ol_txrx_vdev_handle vdev = pvdev;
qdf_spin_lock_bh(&vdev->ll_pause.mutex); qdf_spin_lock_bh(&vdev->ll_pause.mutex);
qdf_timer_stop(&vdev->ll_pause.timer); qdf_timer_stop(&vdev->ll_pause.timer);
vdev->ll_pause.is_q_timer_on = false; vdev->ll_pause.is_q_timer_on = false;
@@ -1769,7 +1780,7 @@ void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
#endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */ #endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
#if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT)) #if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
void ol_txrx_vdev_flush(ol_txrx_vdev_handle data_vdev) void ol_txrx_vdev_flush(void *data_vdev)
{ {
return; return;
} }
@@ -1813,8 +1824,9 @@ ol_txrx_map_to_netif_reason_type(uint32_t reason)
* *
* Return: none * Return: none
*/ */
void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason) void ol_txrx_vdev_pause(void *pvdev, uint32_t reason)
{ {
ol_txrx_vdev_handle vdev = pvdev;
struct ol_txrx_pdev_t *pdev = vdev->pdev; struct ol_txrx_pdev_t *pdev = vdev->pdev;
enum netif_reason_type netif_reason; enum netif_reason_type netif_reason;
@@ -1838,8 +1850,9 @@ void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
* *
* Return: none * Return: none
*/ */
void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason) void ol_txrx_vdev_unpause(void *pvdev, uint32_t reason)
{ {
ol_txrx_vdev_handle vdev = pvdev;
struct ol_txrx_pdev_t *pdev = vdev->pdev; struct ol_txrx_pdev_t *pdev = vdev->pdev;
enum netif_reason_type netif_reason; enum netif_reason_type netif_reason;
@@ -1874,7 +1887,8 @@ void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
struct ol_txrx_vdev_t *vdev = NULL, *tmp; struct ol_txrx_vdev_t *vdev = NULL, *tmp;
TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) { TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
ol_txrx_vdev_pause(vdev, reason); cdp_fc_vdev_pause(
cds_get_context(QDF_MODULE_ID_SOC), vdev, reason);
} }
} }
@@ -1891,7 +1905,8 @@ void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
struct ol_txrx_vdev_t *vdev = NULL, *tmp; struct ol_txrx_vdev_t *vdev = NULL, *tmp;
TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) { TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
ol_txrx_vdev_unpause(vdev, reason); cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
vdev, reason);
} }
} }
@@ -2062,8 +2077,9 @@ ol_tx_set_throttle_phase_time(struct ol_txrx_pdev_t *pdev, int level, int *ms)
} }
#endif #endif
void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level) void ol_tx_throttle_set_level(void *ppdev, int level)
{ {
struct ol_txrx_pdev_t *pdev = ppdev;
int ms = 0; int ms = 0;
if (level >= THROTTLE_LEVEL_MAX) { if (level >= THROTTLE_LEVEL_MAX) {
@@ -2084,9 +2100,10 @@ void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level)
qdf_timer_start(&pdev->tx_throttle.phase_timer, ms); qdf_timer_start(&pdev->tx_throttle.phase_timer, ms);
} }
void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period, void ol_tx_throttle_init_period(void *ppdev, int period,
uint8_t *dutycycle_level) uint8_t *dutycycle_level)
{ {
struct ol_txrx_pdev_t *pdev = ppdev;
int i; int i;
/* Set the current throttle level */ /* Set the current throttle level */

View File

@@ -438,7 +438,7 @@ ol_tx_sched_init_rr(
} }
void void
ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev, ol_txrx_set_wmm_param(void *data_pdev,
struct ol_tx_wmm_param_t wmm_param) struct ol_tx_wmm_param_t wmm_param)
{ {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
@@ -1057,9 +1057,10 @@ ol_tx_sched_init_wrr_adv(
* settings of the scheduler, ie. VO, VI, BE, or BK. * settings of the scheduler, ie. VO, VI, BE, or BK.
*/ */
void void
ol_txrx_set_wmm_param(ol_txrx_pdev_handle data_pdev, ol_txrx_set_wmm_param(void *data_pdev,
struct ol_tx_wmm_param_t wmm_param) struct ol_tx_wmm_param_t wmm_param)
{ {
ol_txrx_pdev_handle data_pdev = pdata_pdev;
struct ol_tx_sched_wrr_adv_t def_cfg; struct ol_tx_sched_wrr_adv_t def_cfg;
struct ol_tx_sched_wrr_adv_t *scheduler = struct ol_tx_sched_wrr_adv_t *scheduler =
data_pdev->tx_sched.scheduler; data_pdev->tx_sched.scheduler;

View File

@@ -116,6 +116,23 @@ ol_tx_target_credit_incr_int(struct ol_txrx_pdev_t *pdev, int delta)
#endif #endif
#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
/**
* ol_txrx_flow_control_cb() - call osif flow control callback
* @vdev: vdev handle
* @tx_resume: tx resume flag
*
* Return: none
*/
void ol_txrx_flow_control_cb(void *pvdev, bool tx_resume)
{
struct ol_txrx_vdev_t *vdev = pvdev;
qdf_spin_lock_bh(&vdev->flow_control_lock);
if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
qdf_spin_unlock_bh(&vdev->flow_control_lock);
return;
}
/** /**
* ol_tx_flow_ct_unpause_os_q() - Unpause OS Q * ol_tx_flow_ct_unpause_os_q() - Unpause OS Q
@@ -867,8 +884,9 @@ ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
* @details * @details
* @param interval - interval for stats computation * @param interval - interval for stats computation
*/ */
void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval) void ol_tx_set_compute_interval(void *ppdev, uint32_t interval)
{ {
ol_txrx_pdev_handle pdev = ppdev;
pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval); pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(interval);
} }
@@ -886,10 +904,11 @@ void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval)
* @param out_packet_loss_count - number of packets lost * @param out_packet_loss_count - number of packets lost
*/ */
void void
ol_tx_packet_count(ol_txrx_pdev_handle pdev, ol_tx_packet_count(void *ppdev,
uint16_t *out_packet_count, uint16_t *out_packet_count,
uint16_t *out_packet_loss_count, int category) uint16_t *out_packet_loss_count, int category)
{ {
ol_txrx_pdev_handle pdev = ppdev;
*out_packet_count = pdev->packet_count[category]; *out_packet_count = pdev->packet_count[category];
*out_packet_loss_count = pdev->packet_loss_count[category]; *out_packet_loss_count = pdev->packet_loss_count[category];
pdev->packet_count[category] = 0; pdev->packet_count[category] = 0;
@@ -914,10 +933,11 @@ uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
} }
void void
ol_tx_delay(ol_txrx_pdev_handle pdev, ol_tx_delay(void *ppdev,
uint32_t *queue_delay_microsec, uint32_t *queue_delay_microsec,
uint32_t *tx_delay_microsec, int category) uint32_t *tx_delay_microsec, int category)
{ {
ol_txrx_pdev_handle pdev = ppdev;
int index; int index;
uint32_t avg_delay_ticks; uint32_t avg_delay_ticks;
struct ol_tx_delay_data *data; struct ol_tx_delay_data *data;
@@ -960,9 +980,10 @@ ol_tx_delay(ol_txrx_pdev_handle pdev,
} }
void void
ol_tx_delay_hist(ol_txrx_pdev_handle pdev, ol_tx_delay_hist(void *ppdev,
uint16_t *report_bin_values, int category) uint16_t *report_bin_values, int category)
{ {
ol_txrx_pdev_handle pdev = ppdev;
int index, i, j; int index, i, j;
struct ol_tx_delay_data *data; struct ol_tx_delay_data *data;

File diff suppressed because it is too large Load Diff

View File

@@ -68,11 +68,11 @@ ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev);
#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS) #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
void void
ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag); ol_txrx_hl_tdls_flag_reset(void *vdev, bool flag);
#else #else
static inline void static inline void
ol_txrx_hl_tdls_flag_reset(struct ol_txrx_vdev_t *vdev, bool flag) ol_txrx_hl_tdls_flag_reset(void *vdev, bool flag)
{ {
return; return;
} }
@@ -84,43 +84,37 @@ void
ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr); ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr);
void void
ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev, ol_txrx_add_last_real_peer(void *pdev, void *vdev,
ol_txrx_vdev_handle vdev,
uint8_t *peer_id); uint8_t *peer_id);
bool bool
is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer); is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer);
void void
ol_txrx_update_last_real_peer( ol_txrx_update_last_real_peer(void *ppdev, void *ppeer,
ol_txrx_pdev_handle pdev,
struct ol_txrx_peer_t *peer,
uint8_t *peer_id, bool restore_last_peer); uint8_t *peer_id, bool restore_last_peer);
#else #else
static inline void static inline void
ol_txrx_copy_mac_addr_raw(ol_txrx_vdev_handle vdev, uint8_t *bss_addr) ol_txrx_copy_mac_addr_raw(void *vdev, uint8_t *bss_addr)
{ {
return; return;
} }
static inline void static inline void
ol_txrx_add_last_real_peer(ol_txrx_pdev_handle pdev, ol_txrx_add_last_real_peer(void *pdev, void *vdev, uint8_t *peer_id)
ol_txrx_vdev_handle vdev, uint8_t *peer_id)
{ {
return; return;
} }
static inline bool static inline bool
is_vdev_restore_last_peer(struct ol_txrx_peer_t *peer) is_vdev_restore_last_peer(void *peer)
{ {
return false; return false;
} }
static inline void static inline void
ol_txrx_update_last_real_peer( ol_txrx_update_last_real_peer(void *ppdev, void *ppeer,
ol_txrx_pdev_handle pdev,
struct ol_txrx_peer_t *peer,
uint8_t *peer_id, bool restore_last_peer) uint8_t *peer_id, bool restore_last_peer)
{ {
@@ -128,11 +122,10 @@ ol_txrx_update_last_real_peer(
} }
#endif #endif
ol_txrx_vdev_handle ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id); void *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id);
void htt_pkt_log_init(struct ol_txrx_pdev_t *handle, void *scn); void htt_pkt_log_init(void *handle, void *scn);
QDF_STATUS ol_txrx_set_wisa_mode(ol_txrx_vdev_handle vdev, QDF_STATUS ol_txrx_set_wisa_mode(void *vdev,
bool enable); bool enable);
void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id); void ol_txrx_update_mac_id(uint8_t vdev_id, uint8_t mac_id);
void ol_txrx_peer_detach_force_delete(ol_txrx_peer_handle peer);
#endif /* _OL_TXRX__H_ */ #endif /* _OL_TXRX__H_ */

View File

@@ -60,10 +60,6 @@
*/ */
#define MAX_NUM_PEER_ID_PER_PEER 16 #define MAX_NUM_PEER_ID_PER_PEER 16
#define OL_TXRX_INVALID_NUM_PEERS (-1)
#define OL_TXRX_MAC_ADDR_LEN 6
/* OL_TXRX_NUM_EXT_TIDS - /* OL_TXRX_NUM_EXT_TIDS -
* 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
*/ */
@@ -351,15 +347,6 @@ struct ol_mac_addr {
struct ol_tx_sched_t; struct ol_tx_sched_t;
#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS
/*
* Each AP will occupy one ID, so it will occupy two IDs for AP-AP mode.
* And the remainder IDs will be assigned to other 32 clients.
*/
#define OL_TXRX_NUM_LOCAL_PEER_IDS (2 + 32)
#endif
#ifndef ol_txrx_local_peer_id_t #ifndef ol_txrx_local_peer_id_t
#define ol_txrx_local_peer_id_t uint8_t /* default */ #define ol_txrx_local_peer_id_t uint8_t /* default */
#endif #endif
@@ -385,16 +372,6 @@ struct ol_tx_delay_data {
#endif /* QCA_COMPUTE_TX_DELAY */ #endif /* QCA_COMPUTE_TX_DELAY */
/* Thermal Mitigation */ /* Thermal Mitigation */
enum throttle_level {
THROTTLE_LEVEL_0,
THROTTLE_LEVEL_1,
THROTTLE_LEVEL_2,
THROTTLE_LEVEL_3,
/* Invalid */
THROTTLE_LEVEL_MAX,
};
enum throttle_phase { enum throttle_phase {
THROTTLE_PHASE_OFF, THROTTLE_PHASE_OFF,
THROTTLE_PHASE_ON, THROTTLE_PHASE_ON,
@@ -954,11 +931,6 @@ struct ol_txrx_pdev_t {
struct ol_txrx_peer_t *self_peer; struct ol_txrx_peer_t *self_peer;
}; };
struct ol_txrx_ocb_chan_info {
uint32_t chan_freq;
uint16_t disable_rx_stats_hdr:1;
};
struct ol_txrx_vdev_t { struct ol_txrx_vdev_t {
struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
the parent of this virtual device */ the parent of this virtual device */
@@ -1222,53 +1194,6 @@ struct ol_txrx_peer_t {
qdf_time_t last_deauth_rcvd; qdf_time_t last_deauth_rcvd;
}; };
enum ol_rx_err_type {
OL_RX_ERR_DEFRAG_MIC,
OL_RX_ERR_PN,
OL_RX_ERR_UNKNOWN_PEER,
OL_RX_ERR_MALFORMED,
OL_RX_ERR_TKIP_MIC,
OL_RX_ERR_DECRYPT,
OL_RX_ERR_MPDU_LENGTH,
OL_RX_ERR_ENCRYPT_REQUIRED,
OL_RX_ERR_DUP,
OL_RX_ERR_UNKNOWN,
OL_RX_ERR_FCS,
OL_RX_ERR_PRIVACY,
OL_RX_ERR_NONE_FRAG,
OL_RX_ERR_NONE = 0xFF
};
/**
* ol_mic_error_info - carries the information associated with
* a MIC error
* @vdev_id: virtual device ID
* @key_id: Key ID
* @pn: packet number
* @sa: source address
* @da: destination address
* @ta: transmitter address
*/
struct ol_mic_error_info {
uint8_t vdev_id;
uint32_t key_id;
uint64_t pn;
uint8_t sa[OL_TXRX_MAC_ADDR_LEN];
uint8_t da[OL_TXRX_MAC_ADDR_LEN];
uint8_t ta[OL_TXRX_MAC_ADDR_LEN];
};
/**
* ol_error_info - carries the information associated with an
* error indicated by the firmware
* @mic_err: MIC error information
*/
struct ol_error_info {
union {
struct ol_mic_error_info mic_err;
} u;
};
struct ol_rx_remote_data { struct ol_rx_remote_data {
qdf_nbuf_t msdu; qdf_nbuf_t msdu;
uint8_t mac_id; uint8_t mac_id;