qcacmn: add cdp wrapper for mobile device compile

add cdp wrapper for mobile device data path compile

Change-Id: I05a6c58056f8884915580c074efa81a5a28f71be
CRs-fixed: 1075597
This commit is contained in:
Leo Chang
2016-09-27 17:00:52 -07:00
committed by qcabuildsw
parent 870abdada3
commit db6358c42f
19 changed files with 2666 additions and 462 deletions

View File

@@ -31,7 +31,46 @@
#ifndef _CDP_TXRX_BUS_H_
#define _CDP_TXRX_BUS_H_
QDF_STATUS ol_txrx_bus_suspend(void);
QDF_STATUS ol_txrx_bus_resume(void);
/**
* cdp_bus_suspend() - suspend bus
* @soc - data path soc handle
*
* suspend bus
*
* return QDF_STATUS_SUCCESS suspend is not implemented or suspend done
*/
static inline QDF_STATUS cdp_bus_suspend(ol_txrx_soc_handle soc)
{
if (!soc || !soc->ops || !soc->ops->bus_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->bus_ops->bus_suspend)
return soc->ops->bus_ops->bus_suspend();
return QDF_STATUS_E_NOSUPPORT;
}
/**
* cdp_bus_resume() - resume bus
* @soc - data path soc handle
*
* resume bus
*
* return QDF_STATUS_SUCCESS resume is not implemented or suspend done
*/
static inline QDF_STATUS cdp_bus_resume(ol_txrx_soc_handle soc)
{
if (!soc || !soc->ops || !soc->ops->bus_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->bus_ops->bus_resume)
return soc->ops->bus_ops->bus_resume();
return QDF_STATUS_E_NOSUPPORT;
}
#endif /* _CDP_TXRX_BUS_H_ */

View File

@@ -32,58 +32,225 @@
#define _CDP_TXRX_CFG_H_
/**
* struct txrx_pdev_cfg_param_t - configuration information
* passed to the data path
* cdp_cfg_set_rx_fwd_disabled() - enable/disable rx forwarding
* @soc - data path soc handle
* @pdev - data path device instance
* @disable_rx_fwd - enable or disable rx forwarding
*
* enable/disable rx forwarding
*
* return NONE
*/
struct txrx_pdev_cfg_param_t {
uint8_t is_full_reorder_offload;
/* IPA Micro controller data path offload enable flag */
uint8_t is_uc_offload_enabled;
/* IPA Micro controller data path offload TX buffer count */
uint32_t uc_tx_buffer_count;
/* IPA Micro controller data path offload TX buffer size */
uint32_t uc_tx_buffer_size;
/* IPA Micro controller data path offload RX indication ring count */
uint32_t uc_rx_indication_ring_count;
/* IPA Micro controller data path offload TX partition base */
uint32_t uc_tx_partition_base;
/* IP, TCP and UDP checksum offload */
bool ip_tcp_udp_checksum_offload;
/* Rx processing in thread from TXRX */
bool enable_rxthread;
/* CE classification enabled through INI */
bool ce_classify_enabled;
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
/* Threshold to stop queue in percentage */
uint32_t tx_flow_stop_queue_th;
/* Start queue offset in percentage */
uint32_t tx_flow_start_queue_offset;
#endif
};
static inline void
cdp_cfg_set_rx_fwd_disabled(ol_txrx_soc_handle soc, void *pdev,
uint8_t disable_rx_fwd)
{
if (!soc || !soc->ops || !soc->ops->cfg_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disable_rx_fwd);
void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, uint8_t val);
ol_pdev_handle ol_pdev_cfg_attach(qdf_device_t osdev,
struct txrx_pdev_cfg_param_t cfg_param);
void ol_vdev_rx_set_intrabss_fwd(ol_txrx_vdev_handle vdev, bool val);
if (soc->ops->cfg_ops->set_cfg_rx_fwd_disabled)
return soc->ops->cfg_ops->set_cfg_rx_fwd_disabled(pdev,
disable_rx_fwd);
}
/**
* ol_txrx_get_opmode() - Return operation mode of vdev
* @vdev: vdev handle
* cdp_cfg_set_packet_log_enabled() - enable/disable packet log
* @soc - data path soc handle
* @pdev - data path device instance
* @val - enable or disable packet log
*
* Return: operation mode.
* packet log enable or disable
*
* return NONE
*/
int ol_txrx_get_opmode(ol_txrx_vdev_handle vdev);
static inline void
cdp_cfg_set_packet_log_enabled(ol_txrx_soc_handle soc,
void *pdev, uint8_t val)
{
if (!soc || !soc->ops || !soc->ops->cfg_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->cfg_ops->set_cfg_packet_log_enabled)
return soc->ops->cfg_ops->set_cfg_packet_log_enabled(pdev,
val);
}
/**
* ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
* @vdev: vdev handle
* cdp_cfg_attach() - attach config module
* @soc - data path soc handle
* @osdev - os instance
* @cfg_param - configuration parameter should be propagated
*
* Return: Rx Fwd disabled status
* Allocate configuration module instance, and propagate configuration values
*
* return soc configuration module instance
*/
uint8_t
ol_txrx_is_rx_fwd_disabled(ol_txrx_vdev_handle vdev);
static inline void
*cdp_cfg_attach(ol_txrx_soc_handle soc,
qdf_device_t osdev, void *cfg_param)
{
if (!soc || !soc->ops || !soc->ops->cfg_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->cfg_ops->cfg_attach)
return soc->ops->cfg_ops->cfg_attach(osdev, cfg_param);
return NULL;
}
/**
* cdp_cfg_vdev_rx_set_intrabss_fwd() - enable/disable intra bass forwarding
* @soc - data path soc handle
* @vdev - virtual interface instance
* @val - enable or disable intra bss forwarding
*
* ap isolate, do not forward intra bss traffic
*
* return NONE
*/
static inline void
cdp_cfg_vdev_rx_set_intrabss_fwd(ol_txrx_soc_handle soc, void *vdev, bool val)
{
if (!soc || !soc->ops || !soc->ops->cfg_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->cfg_ops->vdev_rx_set_intrabss_fwd)
return soc->ops->cfg_ops->vdev_rx_set_intrabss_fwd(vdev, val);
}
/**
* cdp_cfg_is_rx_fwd_disabled() - get vdev rx forward
* @soc - data path soc handle
* @vdev - virtual interface instance
*
* Return rx forward feature enable status
*
* return 1 enabled
* 0 disabled
*/
static inline uint8_t
cdp_cfg_is_rx_fwd_disabled(ol_txrx_soc_handle soc, void *vdev)
{
if (!soc || !soc->ops || !soc->ops->cfg_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->cfg_ops->is_rx_fwd_disabled)
return soc->ops->cfg_ops->is_rx_fwd_disabled(vdev);
return 0;
}
/**
* cdp_cfg_tx_set_is_mgmt_over_wmi_enabled() - mgmt tx over wmi enable/disable
* @soc - data path soc handle
* @value - feature enable or disable
*
* Enable or disable management packet TX over WMI feature
*
* return None
*/
static inline void
cdp_cfg_tx_set_is_mgmt_over_wmi_enabled(ol_txrx_soc_handle soc,
uint8_t value)
{
if (!soc || !soc->ops || !soc->ops->cfg_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->cfg_ops->tx_set_is_mgmt_over_wmi_enabled)
return soc->ops->cfg_ops->tx_set_is_mgmt_over_wmi_enabled(
value);
}
/**
* cdp_cfg_is_high_latency() - query data path is in high or low latency
* @soc - data path soc handle
* @pdev - data path device instance
*
* query data path is in high or low latency
*
* return 1 high latency data path, usb or sdio
* 0 low latency data path
*/
static inline int
cdp_cfg_is_high_latency(ol_txrx_soc_handle soc, void *pdev)
{
if (!soc || !soc->ops || !soc->ops->cfg_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->cfg_ops->is_high_latency)
return soc->ops->cfg_ops->is_high_latency(pdev);
return 0;
}
/**
* cdp_cfg_set_flow_control_parameters() - set flow control params
* @soc - data path soc handle
* @cfg - dp config module instance
* @param - parameters should set
*
* set flow control params
*
* return None
*/
static inline void
cdp_cfg_set_flow_control_parameters(ol_txrx_soc_handle soc,
void *cfg, void *param)
{
if (!soc || !soc->ops || !soc->ops->cfg_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->cfg_ops->set_flow_control_parameters)
return soc->ops->cfg_ops->set_flow_control_parameters(cfg,
param);
return;
}
/**
* cdp_cfg_set_flow_steering - Set Rx flow steering config based on CFG ini
* config.
*
* @pdev - handle to the physical device
* @val - 0 - disable, 1 - enable
*
* Return: None
*/
static inline void cdp_cfg_set_flow_steering(ol_txrx_soc_handle soc,
void *pdev, uint8_t val)
{
if (!soc || !soc->ops || !soc->ops->cfg_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->cfg_ops->set_flow_steering)
return soc->ops->cfg_ops->set_flow_steering(pdev, val);
return;
}
#endif /* _CDP_TXRX_CFG_H_ */

View File

@@ -87,6 +87,13 @@ static inline void *cdp_pdev_attach
return NULL;
}
static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc, void *pdev)
{
if (soc->ops->cmn_drv_ops->txrx_pdev_post_attach)
return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev);
return 0;
}
static inline void
cdp_pdev_detach(ol_txrx_soc_handle soc, void *pdev, int force)
{
@@ -310,11 +317,12 @@ cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, void *vdev)
}
static inline void *
cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, uint8_t vdev_id)
cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, void *pdev,
uint8_t vdev_id)
{
if (soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id)
return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id
(vdev_id);
(pdev, vdev_id);
return NULL;
}

View File

@@ -34,13 +34,36 @@
ol_txrx_soc_handle ol_txrx_soc_attach(struct ol_if_ops *dp_ol_if_ops);
#ifdef QCA_WIFI_QCA8074
void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops);
#else
/*
* dp_soc_attach_wifi3() - Attach txrx SOC
* @osif_soc: Opaque SOC handle from OSIF/HDD
* @htc_handle: Opaque HTC handle
* @hif_handle: Opaque HIF handle
* @qdf_osdev: QDF device
*
* Return: DP SOC handle on success, NULL on failure
*/
static inline void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
struct ol_if_ops *ol_ops)
{
return NULL;
}
#endif /* QCA_WIFI_QCA8074 */
static inline ol_txrx_soc_handle cdp_soc_attach(u_int16_t devid,
void *hif_handle, void *scn, void *htc_handle, qdf_device_t *qdf_dev,
struct ol_if_ops *dp_ol_if_ops)
void *hif_handle, void *scn, void *htc_handle,
qdf_device_t qdf_dev, struct ol_if_ops *dp_ol_if_ops)
{
switch (devid) {
case LITHIUM_DP: /*FIXME Add lithium devide IDs */
return NULL;
return dp_soc_attach_wifi3(scn, hif_handle, htc_handle,
qdf_dev, dp_ol_if_ops);
break;
default:
return ol_txrx_soc_attach(dp_ol_if_ops);

View File

@@ -24,10 +24,24 @@
#ifndef _CDP_TXRX_CMN_STRUCT_H_
#define _CDP_TXRX_CMN_STRUCT_H_
/**
* For WIN legacy header compilation
* Temporary add dummy definitions
* should be removed properly WIN legacy code handle
*/
#include "htc_api.h"
#include "qdf_types.h"
#include "qdf_nbuf.h"
#ifndef CONFIG_WIN
#include <cdp_txrx_mob_def.h>
#endif /* CONFIG_WIN */
#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS
#define OL_TXRX_NUM_LOCAL_PEER_IDS 33 /* default */
#endif
#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
/*
* htt_dbg_stats_type -
@@ -104,6 +118,16 @@ enum htt_cmn_t2h_en_stats_status {
HTT_CMN_T2H_EN_STATS_STATUS_SERIES_DONE = 7,
};
/**
* struct ol_txrx_peer_state - Peer state information
*/
enum ol_txrx_peer_state {
OL_TXRX_PEER_STATE_INVALID,
OL_TXRX_PEER_STATE_DISC, /* initial state */
OL_TXRX_PEER_STATE_CONN, /* authentication in progress */
OL_TXRX_PEER_STATE_AUTH, /* authentication successful */
};
typedef struct cdp_soc_t *ol_txrx_soc_handle;
/**
@@ -135,6 +159,7 @@ enum wlan_op_mode {
wlan_op_mode_sta,
wlan_op_mode_monitor,
wlan_op_mode_ocb,
wlan_op_mode_ndi,
};
/**
@@ -310,7 +335,7 @@ struct ol_txrx_stats_req {
/* DP soc struct definition */
struct cdp_soc_t {
struct cdp_ops *ops;
struct ol_if_ops *ol_ops;
struct ol_if_ops *ol_ops;
};

View File

@@ -31,100 +31,190 @@
*/
#ifndef _CDP_TXRX_FC_LEG_H_
#define _CDP_TXRX_FC_LEG_H_
#include <cdp_txrx_mob_def.h>
/**
* enum netif_action_type - Type of actions on netif queues
* @WLAN_STOP_ALL_NETIF_QUEUE: stop all netif queues
* @WLAN_START_ALL_NETIF_QUEUE: start all netif queues
* @WLAN_WAKE_ALL_NETIF_QUEUE: wake all netif queues
* @WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER: stop all queues and off carrier
* @WLAN_START_ALL_NETIF_QUEUE_N_CARRIER: start all queues and on carrier
* @WLAN_NETIF_TX_DISABLE: disable tx
* @WLAN_NETIF_TX_DISABLE_N_CARRIER: disable tx and off carrier
* @WLAN_NETIF_CARRIER_ON: on carrier
* @WLAN_NETIF_CARRIER_OFF: off carrier
* cdp_fc_register() - Register flow control callback function pointer
* @soc - data path soc handle
* @vdev_id - virtual interface id to register flow control
* @flowControl - callback function pointer
* @osif_fc_ctx - client context pointer
*
* Register flow control callback function pointer and client context pointer
*
* return 0 success
*/
enum netif_action_type {
WLAN_STOP_ALL_NETIF_QUEUE = 1,
WLAN_START_ALL_NETIF_QUEUE,
WLAN_WAKE_ALL_NETIF_QUEUE,
WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER,
WLAN_START_ALL_NETIF_QUEUE_N_CARRIER,
WLAN_NETIF_TX_DISABLE,
WLAN_NETIF_TX_DISABLE_N_CARRIER,
WLAN_NETIF_CARRIER_ON,
WLAN_NETIF_CARRIER_OFF,
WLAN_NETIF_ACTION_TYPE_MAX,
};
/**
* enum netif_reason_type - reason for netif queue action
* @WLAN_CONTROL_PATH: action from control path
* @WLAN_DATA_FLOW_CONTROL: because of flow control
* @WLAN_FW_PAUSE: because of firmware pause
* @WLAN_TX_ABORT: because of tx abort
* @WLAN_VDEV_STOP: because of vdev stop
* @WLAN_PEER_UNAUTHORISED: because of peer is unauthorised
* @WLAN_THERMAL_MITIGATION: because of thermal mitigation
*/
enum netif_reason_type {
WLAN_CONTROL_PATH = 1,
WLAN_DATA_FLOW_CONTROL,
WLAN_FW_PAUSE,
WLAN_TX_ABORT,
WLAN_VDEV_STOP,
WLAN_PEER_UNAUTHORISED,
WLAN_THERMAL_MITIGATION,
WLAN_REASON_TYPE_MAX,
};
#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
/**
* ol_txrx_tx_flow_control_fp - tx flow control notification
* function from txrx to OS shim
* @osif_dev - the virtual device's OS shim object
* @tx_resume - tx os q should be resumed or not
*/
typedef void (*ol_txrx_tx_flow_control_fp)(void *osif_dev,
bool tx_resume);
int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
ol_txrx_tx_flow_control_fp flowControl,
void *osif_fc_ctx);
int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id);
void ol_txrx_flow_control_cb(ol_txrx_vdev_handle vdev,
bool tx_resume);
bool
ol_txrx_get_tx_resource(uint8_t sta_id,
unsigned int low_watermark,
unsigned int high_watermark_offset);
int
ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth);
#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
void ol_txrx_vdev_flush(ol_txrx_vdev_handle data_vdev);
#ifdef CONFIG_ICNSS
static inline void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
static inline int
cdp_fc_register(ol_txrx_soc_handle soc, uint8_t vdev_id,
ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx)
{
if (!soc || !soc->ops || !soc->ops->l_flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->l_flowctl_ops->register_tx_flow_control)
return soc->ops->l_flowctl_ops->register_tx_flow_control(
vdev_id, flowControl, osif_fc_ctx);
return 0;
}
/**
* cdp_fc_deregister() - remove flow control instance
* @soc - data path soc handle
* @vdev_id - virtual interface id to register flow control
*
* remove flow control instance
*
* return 0 success
*/
static inline int
cdp_fc_deregister(ol_txrx_soc_handle soc, uint8_t vdev_id)
{
if (!soc || !soc->ops || !soc->ops->l_flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->l_flowctl_ops->deregister_tx_flow_control_cb)
return soc->ops->l_flowctl_ops->deregister_tx_flow_control_cb(
vdev_id);
return 0;
}
/**
* cdp_fc_get_tx_resource() - get data path resource count
* @soc - data path soc handle
* @sta_id - local peer id
* @low_watermark - low resource threshold
* @high_watermark_offset - high resource threshold
*
* get data path resource count
*
* return true enough data path resource available
* false resource is not avaialbe
*/
static inline bool
cdp_fc_get_tx_resource(ol_txrx_soc_handle soc, uint8_t sta_id,
unsigned int low_watermark, unsigned int high_watermark_offset)
{
if (!soc || !soc->ops || !soc->ops->l_flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return false;
}
if (soc->ops->l_flowctl_ops->get_tx_resource)
return soc->ops->l_flowctl_ops->get_tx_resource(sta_id,
low_watermark, high_watermark_offset);
return false;
}
/**
* cdp_fc_ll_set_tx_pause_q_depth() - set pause queue depth
* @soc - data path soc handle
* @vdev_id - virtual interface id to register flow control
* @pause_q_depth - pending tx queue delth
*
* set pause queue depth
*
* return 0 success
*/
static inline int
cdp_fc_ll_set_tx_pause_q_depth(ol_txrx_soc_handle soc,
uint8_t vdev_id, int pause_q_depth)
{
if (!soc || !soc->ops || !soc->ops->l_flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->l_flowctl_ops->ll_set_tx_pause_q_depth)
return soc->ops->l_flowctl_ops->ll_set_tx_pause_q_depth(vdev_id,
pause_q_depth);
return 0;
}
/**
* cdp_fc_vdev_flush() - flush tx queue
* @soc - data path soc handle
* @vdev - virtual interface context pointer
*
* flush tx queue
*
* return None
*/
static inline void
cdp_fc_vdev_flush(ol_txrx_soc_handle soc, void *vdev)
{
if (!soc || !soc->ops || !soc->ops->l_flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->l_flowctl_ops->vdev_flush)
return soc->ops->l_flowctl_ops->vdev_flush(vdev);
return;
}
#else
void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason);
#endif
#ifdef CONFIG_ICNSS
static inline void ol_txrx_vdev_unpause(ol_txrx_vdev_handle data_vdev,
uint32_t reason)
/**
* cdp_fc_vdev_pause() - pause tx scheduler on vdev
* @soc - data path soc handle
* @vdev - virtual interface context pointer
* @reason - pause reason
*
* pause tx scheduler on vdev
*
* return None
*/
static inline void
cdp_fc_vdev_pause(ol_txrx_soc_handle soc, void *vdev,
uint32_t reason)
{
if (!soc || !soc->ops || !soc->ops->l_flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->l_flowctl_ops->vdev_pause)
return soc->ops->l_flowctl_ops->vdev_pause(vdev, reason);
return;
}
#else
void ol_txrx_vdev_unpause(ol_txrx_vdev_handle data_vdev, uint32_t reason);
#endif
/**
* cdp_fc_vdev_unpause() - resume tx scheduler on vdev
* @soc - data path soc handle
* @vdev - virtual interface context pointer
* @reason - pause reason
*
* resume tx scheduler on vdev
*
* return None
*/
static inline void
cdp_fc_vdev_unpause(ol_txrx_soc_handle soc, void *vdev,
uint32_t reason)
{
if (!soc || !soc->ops || !soc->ops->l_flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->l_flowctl_ops->vdev_unpause)
return soc->ops->l_flowctl_ops->vdev_unpause(vdev, reason);
return;
}
#endif /* _CDP_TXRX_FC_LEG_H_ */

View File

@@ -31,33 +31,81 @@
*/
#ifndef _CDP_TXRX_FC_V2_H_
#define _CDP_TXRX_FC_V2_H_
#include "cdp_txrx_flow_ctrl_legacy.h"
#include <cdp_txrx_ops.h>
/**
* @typedef ol_tx_pause_callback_fp
* @brief OSIF function registered with the data path
* cdp_register_pause_cb() - Register flow control callback function pointer
* @soc - data path soc handle
* @pause_cb - callback function pointer
*
* Register flow control callback function pointer and client context pointer
*
* return QDF_STATUS_SUCCESS success
*/
typedef void (*ol_tx_pause_callback_fp)(uint8_t vdev_id,
enum netif_action_type action,
enum netif_reason_type reason);
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
QDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb);
void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
#else
static inline
QDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
static inline QDF_STATUS
cdp_register_pause_cb(ol_txrx_soc_handle soc,
ol_tx_pause_callback_fp pause_cb)
{
return QDF_STATUS_SUCCESS;
if (!soc || !soc->ops || !soc->ops->flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->flowctl_ops->register_pause_cb)
return soc->ops->flowctl_ops->register_pause_cb(pause_cb);
return QDF_STATUS_SUCCESS;
}
static inline void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
/**
* cdp_set_desc_global_pool_size() - set global device pool size
* @soc - data path soc handle
* @num_msdu_desc - descriptor pool size
*
* set global device pool size
*
* return none
*/
static inline void
cdp_set_desc_global_pool_size(ol_txrx_soc_handle soc,
uint32_t num_msdu_desc)
{
if (!soc || !soc->ops || !soc->ops->flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->flowctl_ops->set_desc_global_pool_size)
return soc->ops->flowctl_ops->set_desc_global_pool_size(
num_msdu_desc);
return;
}
#endif
/**
* cdp_dump_flow_pool_info() - dump flow pool information
* @soc - data path soc handle
*
* dump flow pool information
*
* return none
*/
static inline void
cdp_dump_flow_pool_info(ol_txrx_soc_handle soc)
{
if (!soc || !soc->ops || !soc->ops->flowctl_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->flowctl_ops->dump_flow_pool_info)
return soc->ops->flowctl_ops->dump_flow_pool_info();
return;
}
#endif /* _CDP_TXRX_FC_V2_H_ */

View File

@@ -30,100 +30,212 @@
*/
#ifndef _CDP_TXRX_IPA_H_
#define _CDP_TXRX_IPA_H_
#include <cdp_txrx_mob_def.h>
/**
* ol_txrx_ipa_resources - Resources needed for IPA
* cdp_ipa_get_resource() - Get allocated wlan resources for ipa data path
* @soc - data path soc handle
* @pdev - device instance pointer
* @ipa_res - ipa resources pointer
*
* Get allocated wlan resources for ipa data path
*
* return none
*/
struct ol_txrx_ipa_resources {
qdf_dma_addr_t ce_sr_base_paddr;
uint32_t ce_sr_ring_size;
qdf_dma_addr_t ce_reg_paddr;
qdf_dma_addr_t tx_comp_ring_base_paddr;
uint32_t tx_comp_ring_size;
uint32_t tx_num_alloc_buffer;
qdf_dma_addr_t rx_rdy_ring_base_paddr;
uint32_t rx_rdy_ring_size;
qdf_dma_addr_t rx_proc_done_idx_paddr;
void *rx_proc_done_idx_vaddr;
qdf_dma_addr_t rx2_rdy_ring_base_paddr;
uint32_t rx2_rdy_ring_size;
qdf_dma_addr_t rx2_proc_done_idx_paddr;
void *rx2_proc_done_idx_vaddr;
};
#ifdef IPA_OFFLOAD
void
ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
struct ol_txrx_ipa_resources *ipa_res);
void
ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
qdf_dma_addr_t ipa_rx_uc_doorbell_paddr);
void
ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev,
bool uc_active, bool is_tx);
void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg);
void ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
void (*ipa_uc_op_cb_type)(uint8_t *op_msg,
void *osif_ctxt),
void *osif_dev);
void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev);
qdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev, qdf_nbuf_t skb);
#else
static inline void
ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
cdp_ipa_get_resource(ol_txrx_soc_handle soc, void *pdev,
struct ol_txrx_ipa_resources *ipa_res)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->ipa_ops->ipa_get_resource)
return soc->ops->ipa_ops->ipa_get_resource(pdev, ipa_res);
return;
}
/**
* cdp_ipa_set_doorbell_paddr() - give IPA db paddr to fw
* @soc - data path soc handle
* @pdev - device instance pointer
* @ipa_tx_uc_doorbell_paddr - tx db paddr
* @ipa_rx_uc_doorbell_paddr - rx db paddr
*
* give IPA db paddr to fw
*
* return none
*/
static inline void
ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
qdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
cdp_ipa_set_doorbell_paddr(ol_txrx_soc_handle soc, void *pdev,
qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
qdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->ipa_ops->ipa_set_doorbell_paddr)
return soc->ops->ipa_ops->ipa_set_doorbell_paddr(pdev,
ipa_tx_uc_doorbell_paddr, ipa_rx_uc_doorbell_paddr);
return;
}
/**
* cdp_ipa_set_active() - activate/de-ctivate wlan fw ipa data path
* @soc - data path soc handle
* @pdev - device instance pointer
* @uc_active - activate or de-activate
* @is_tx - toggle tx or rx data path
*
* activate/de-ctivate wlan fw ipa data path
*
* return none
*/
static inline void
ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev,
bool uc_active, bool is_tx)
cdp_ipa_set_active(ol_txrx_soc_handle soc, void *pdev,
bool uc_active, bool is_tx)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->ipa_ops->ipa_set_active)
return soc->ops->ipa_ops->ipa_set_active(pdev, uc_active,
is_tx);
return;
}
/**
* cdp_ipa_op_response() - event handler from fw
* @soc - data path soc handle
* @pdev - device instance pointer
* @op_msg - event contents from firmware
*
* event handler from fw
*
* return none
*/
static inline void
ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg)
cdp_ipa_op_response(ol_txrx_soc_handle soc, void *pdev,
uint8_t *op_msg)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->ipa_ops->ipa_op_response)
return soc->ops->ipa_ops->ipa_op_response(pdev, op_msg);
return;
}
/**
* cdp_ipa_register_op_cb() - register event handler function pointer
* @soc - data path soc handle
* @pdev - device instance pointer
* @op_cb - event handler callback function pointer
* @osif_dev - osif instance pointer
*
* register event handler function pointer
*
* return none
*/
static inline void
ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
void (*ipa_uc_op_cb_type)(uint8_t *op_msg,
void *osif_ctxt),
void *osif_dev)
cdp_ipa_register_op_cb(ol_txrx_soc_handle soc, void *pdev,
ipa_op_cb_type op_cb, void *osif_dev)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->ipa_ops->ipa_register_op_cb)
return soc->ops->ipa_ops->ipa_register_op_cb(pdev, op_cb,
osif_dev);
return;
}
static inline void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev)
/**
* cdp_ipa_get_stat() - get ipa data path stats from fw
* @soc - data path soc handle
* @pdev - device instance pointer
*
* get ipa data path stats from fw async
*
* return none
*/
static inline void
cdp_ipa_get_stat(ol_txrx_soc_handle soc, void *pdev)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->ipa_ops->ipa_get_stat)
return soc->ops->ipa_ops->ipa_get_stat(pdev);
return;
}
#endif /* IPA_OFFLOAD */
/**
* cdp_tx_send_ipa_data_frame() - send IPA data frame
* @vdev: vdev
* @skb: skb
*
* Return: skb/ NULL is for success
*/
static inline qdf_nbuf_t cdp_ipa_tx_send_data_frame(ol_txrx_soc_handle soc,
void *vdev, qdf_nbuf_t skb)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return skb;
}
if (soc->ops->ipa_ops->ipa_tx_data_frame)
return soc->ops->ipa_ops->ipa_tx_data_frame(vdev, skb);
return skb;
}
/**
* cdp_ipa_set_uc_tx_partition_base() - set tx packet partition base
* @pdev: physical device instance
* @value: partition base value
*
* Return: none
*/
static inline void cdp_ipa_set_uc_tx_partition_base(ol_txrx_soc_handle soc,
void *pdev, uint32_t value)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->ipa_ops->ipa_set_uc_tx_partition_base)
return soc->ops->ipa_ops->ipa_set_uc_tx_partition_base(pdev,
value);
return;
}
#endif /* _CDP_TXRX_IPA_H_ */

View File

@@ -31,9 +31,52 @@
*/
#ifndef _CDP_TXRX_LRO_H_
#define _CDP_TXRX_LRO_H_
/**
* cdp_register_lro_flush_cb() - register lro flsu cb function pointer
* @soc - data path soc handle
* @pdev - device instance pointer
*
* register lro flush callback function pointer
*
* return none
*/
static inline void cdp_register_lro_flush_cb(ol_txrx_soc_handle soc,
void (lro_flush_cb)(void *), void *(lro_init_cb)(void))
{
if (!soc || !soc->ops || !soc->ops->lro_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
void ol_register_lro_flush_cb(void (lro_flush_cb)(void *),
void *(lro_init_cb)(void));
void ol_deregister_lro_flush_cb(void (lro_deinit_cb)(void *));
if (soc->ops->lro_ops->register_lro_flush_cb)
return soc->ops->lro_ops->register_lro_flush_cb(lro_flush_cb,
lro_init_cb);
return;
}
/**
* cdp_deregister_lro_flush_cb() - deregister lro flsu cb function pointer
* @soc - data path soc handle
*
* deregister lro flush callback function pointer
*
* return none
*/
static inline void cdp_deregister_lro_flush_cb(ol_txrx_soc_handle soc
void (lro_deinit_cb)(void *))
{
if (!soc || !soc->ops || !soc->ops->lro_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->lro_ops->deregister_lro_flush_cb)
return soc->ops->lro_ops->deregister_lro_flush_cb(
lro_deinit_cb);
return;
}
#endif /* _CDP_TXRX_LRO_H_ */

View File

@@ -33,37 +33,9 @@
#define _CDP_TXRX_MISC_H_
/**
* @enum ol_tx_spec
* @brief indicate what non-standard transmission actions to apply
* @details
* Indicate one or more of the following:
* - The tx frame already has a complete 802.11 header.
* Thus, skip 802.3/native-WiFi to 802.11 header encapsulation and
* A-MSDU aggregation.
* - The tx frame should not be aggregated (A-MPDU or A-MSDU)
* - The tx frame is already encrypted - don't attempt encryption.
* - The tx frame is a segment of a TCP jumbo frame.
* - This tx frame should not be unmapped and freed by the txrx layer
* after transmission, but instead given to a registered tx completion
* callback.
* More than one of these specification can apply, though typically
* only a single specification is applied to a tx frame.
* A compound specification can be created, as a bit-OR of these
* specifications.
*/
enum ol_tx_spec {
OL_TX_SPEC_STD = 0x0, /* do regular processing */
OL_TX_SPEC_RAW = 0x1, /* skip encap + A-MSDU aggr */
OL_TX_SPEC_NO_AGGR = 0x2, /* skip encap + all aggr */
OL_TX_SPEC_NO_ENCRYPT = 0x4, /* skip encap + encrypt */
OL_TX_SPEC_TSO = 0x8, /* TCP segmented */
OL_TX_SPEC_NWIFI_NO_ENCRYPT = 0x10, /* skip encrypt for nwifi */
OL_TX_SPEC_NO_FREE = 0x20, /* give to cb rather than free */
};
/**
* ol_tx_non_std() - Allow the control-path SW to send data frames
* cdp_tx_non_std() - Allow the control-path SW to send data frames
*
* @soc - data path soc handle
* @data_vdev - which vdev should transmit the tx data frames
* @tx_spec - what non-standard handling to apply to the tx data frames
* @msdu_list - NULL-terminated list of tx MSDUs
@@ -83,32 +55,408 @@ enum ol_tx_spec {
*
* Return: null - success, skb - failure
*/
qdf_nbuf_t ol_tx_non_std(ol_txrx_vdev_handle vdev,
enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
static inline qdf_nbuf_t
cdp_tx_non_std(ol_txrx_soc_handle soc, void *vdev,
enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->misc_ops->tx_non_std)
return soc->ops->misc_ops->tx_non_std(
vdev, tx_spec, msdu_list);
return NULL;
}
/**
* ol_txrx_update_ibss_vdev_heart_beat_timer_of_vdev() - Update ibss vdev heart
* cdp_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
* beat timer
* @vdev: vdev handle
* @timer_value_sec: new heart beat timer value
* @soc - data path soc handle
* @vdev - vdev handle
* @timer_value_sec - new heart beat timer value
*
* Return: Old timer value set in vdev.
*/
uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(ol_txrx_vdev_handle vdev,
uint16_t timer_value_sec);
static inline uint16_t
cdp_set_ibss_vdev_heart_beat_timer(ol_txrx_soc_handle soc, void *vdev,
uint16_t timer_value_sec)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->misc_ops->set_ibss_vdev_heart_beat_timer)
return soc->ops->misc_ops->set_ibss_vdev_heart_beat_timer(
vdev, timer_value_sec);
return 0;
}
/**
* cdp_set_wisa_mode() - set wisa mode
* @soc - data path soc handle
* @vdev - vdev handle
* @enable - enable or disable
*
* Return: QDF_STATUS_SUCCESS mode enable success
*/
static inline QDF_STATUS
cdp_set_wisa_mode(ol_txrx_soc_handle soc, void *vdev, bool enable)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->misc_ops->set_wisa_mode)
return soc->ops->misc_ops->set_wisa_mode(vdev, enable);
return QDF_STATUS_SUCCESS;
}
/**
* cdp_set_wmm_param() - set wmm parameter
* @soc - data path soc handle
* @pdev - device instance pointer
* @wmm_param - wmm parameter
*
* Return: none
*/
static inline void
cdp_set_wmm_param(ol_txrx_soc_handle soc, void *pdev,
struct ol_tx_wmm_param_t wmm_param)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->misc_ops->set_wmm_param)
return soc->ops->misc_ops->set_wmm_param(
pdev, wmm_param);
return;
}
/**
* cdp_runtime_suspend() - suspend
* @soc - data path soc handle
* @pdev - device instance pointer
*
* Return: QDF_STATUS_SUCCESS suspend success
*/
static inline QDF_STATUS cdp_runtime_suspend(ol_txrx_soc_handle soc,
void *pdev)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->misc_ops->runtime_suspend)
return soc->ops->misc_ops->runtime_suspend(pdev);
return QDF_STATUS_SUCCESS;
}
/**
* cdp_runtime_resume() - resume
* @soc - data path soc handle
* @pdev - device instance pointer
*
* Return: QDF_STATUS_SUCCESS suspend success
*/
static inline QDF_STATUS cdp_runtime_resume(ol_txrx_soc_handle soc,
void *pdev)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->misc_ops->runtime_resume)
return soc->ops->misc_ops->runtime_resume(pdev);
return QDF_STATUS_SUCCESS;
}
/**
* cdp_hl_tdls_flag_reset() - tdls flag reset
* @soc - data path soc handle
* @vdev - virtual interface handle pointer
* @flag
*
* Return: none
*/
static inline void
cdp_hl_tdls_flag_reset(ol_txrx_soc_handle soc, void *vdev, bool flag)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->misc_ops->hl_tdls_flag_reset)
return soc->ops->misc_ops->hl_tdls_flag_reset(vdev, flag);
return;
}
/**
* cdp_get_opmode() - get vdev operation mode
* @soc - data path soc handle
* @vdev - virtual interface instance
*
* Return virtual device operational mode
* op_mode_ap,
* op_mode_ibss,
* op_mode_sta,
* op_mode_monitor,
* op_mode_ocb,
*
* return interface id
* 0 unknown interface
*/
static inline int
cdp_get_opmode(ol_txrx_soc_handle soc, void *vdev)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->misc_ops->get_opmode)
return soc->ops->misc_ops->get_opmode(vdev);
return 0;
}
/**
* cdp_get_vdev_id() - get vdev id
* @soc - data path soc handle
* @vdev - virtual interface instance
*
* get virtual interface id
*
* return interface id
* 0 unknown interface
*/
static inline uint16_t
cdp_get_vdev_id(ol_txrx_soc_handle soc, void *vdev)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->misc_ops->get_vdev_id)
return soc->ops->misc_ops->get_vdev_id(vdev);
return 0;
}
/**
* cdp_bad_peer_txctl_set_setting() - TBD
* @soc - data path soc handle
* @pdev - data path device instance
* @enable -
* @period -
* @txq_limit -
*
* TBD
*
* Return: none
*/
static inline void
cdp_bad_peer_txctl_set_setting(ol_txrx_soc_handle soc, void *pdev,
int enable, int period, int txq_limit)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->misc_ops->bad_peer_txctl_set_setting)
return soc->ops->misc_ops->bad_peer_txctl_set_setting(pdev,
enable, period, txq_limit);
return;
}
/**
* cdp_bad_peer_txctl_update_threshold() - TBD
* @soc - data path soc handle
* @pdev - data path device instance
* @level -
* @tput_thresh -
* @tx_limit -
*
* TBD
*
* Return: none
*/
static inline void
cdp_bad_peer_txctl_update_threshold(ol_txrx_soc_handle soc, void *pdev,
int level, int tput_thresh, int tx_limit)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->misc_ops->bad_peer_txctl_update_threshold)
return soc->ops->misc_ops->bad_peer_txctl_update_threshold(
pdev, level, tput_thresh, tx_limit);
return;
}
/**
* cdp_mark_first_wakeup_packet() - set flag to indicate that
* fw is compatible for marking first packet after wow wakeup
* @soc - data path soc handle
* @value: 1 for enabled/ 0 for disabled
*
* Return: None
*/
static inline void cdp_mark_first_wakeup_packet(ol_txrx_soc_handle soc,
uint8_t value)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->misc_ops->mark_first_wakeup_packet)
return soc->ops->misc_ops->mark_first_wakeup_packet(value);
return;
}
/**
* cds_update_mac_id() - update mac_id for vdev
* @soc - data path soc handle
* @vdev_id: vdev id
* @mac_id: mac id
*
* Return: none
*/
static inline void cdp_update_mac_id(void *psoc, uint8_t vdev_id,
uint8_t mac_id)
{
ol_txrx_soc_handle soc = psoc;
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->misc_ops->update_mac_id)
return soc->ops->misc_ops->update_mac_id(vdev_id, mac_id);
return;
}
/**
* cdp_flush_rx_frames() - flush cached rx frames
* @soc - data path soc handle
* @peer: peer
* @drop: set flag to drop frames
*
* Return: None
*/
static inline void cdp_flush_rx_frames(ol_txrx_soc_handle soc, void *peer,
bool drop)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->misc_ops->flush_rx_frames)
return soc->ops->misc_ops->flush_rx_frames(peer, drop);
return;
}
/*
* cdp_get_intra_bss_fwd_pkts_count() - to get the total tx and rx packets
* that has been forwarded from txrx layer without going to upper layers.
* @vdev_id: vdev id
* @fwd_tx_packets: pointer to forwarded tx packets count parameter
* @fwd_rx_packets: pointer to forwarded rx packets count parameter
*
* Return: status -> A_OK - success, A_ERROR - failure
*/
static inline A_STATUS cdp_get_intra_bss_fwd_pkts_count(
ol_txrx_soc_handle soc, uint8_t vdev_id,
uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->misc_ops->get_intra_bss_fwd_pkts_count)
return soc->ops->misc_ops->get_intra_bss_fwd_pkts_count(
vdev_id, fwd_tx_packets, fwd_rx_packets);
return 0;
}
/**
* cdp_pkt_log_init() - API to initialize packet log
* @handle: pdev handle
* @scn: HIF context
*
* Return: void
*/
static inline void cdp_pkt_log_init(ol_txrx_soc_handle soc, void *pdev,
void *scn)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->misc_ops->pkt_log_init)
return soc->ops->misc_ops->pkt_log_init(pdev, scn);
return;
}
/**
* cdp_pkt_log_con_service() - API to connect packet log service
* @handle: pdev handle
* @scn: HIF context
*
* Return: void
*/
static inline void cdp_pkt_log_con_service(ol_txrx_soc_handle soc,
void *pdev, void *scn)
{
if (!soc || !soc->ops || !soc->ops->misc_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->misc_ops->pkt_log_con_service)
return soc->ops->misc_ops->pkt_log_con_service(pdev, scn);
return;
}
#endif /* _CDP_TXRX_MISC_H_ */

394
dp/inc/cdp_txrx_mob_def.h Normal file
View File

@@ -0,0 +1,394 @@
/*
* Copyright (c) 2016 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __CDP_TXRX_MOB_DEF_H
#define __CDP_TXRX_MOB_DEF_H
#include <sir_types.h>
#include <htt.h>
#define OL_TXQ_PAUSE_REASON_FW (1 << 0)
#define OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED (1 << 1)
#define OL_TXQ_PAUSE_REASON_TX_ABORT (1 << 2)
#define OL_TXQ_PAUSE_REASON_VDEV_STOP (1 << 3)
#define OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION (1 << 4)
/* command options for dumpStats*/
#define WLAN_HDD_STATS 0
#define WLAN_TXRX_STATS 1
#define WLAN_TXRX_HIST_STATS 2
#define WLAN_TXRX_TSO_STATS 3
#define WLAN_HDD_NETIF_OPER_HISTORY 4
#define WLAN_DUMP_TX_FLOW_POOL_INFO 5
#define WLAN_TXRX_DESC_STATS 6
#define WLAN_HIF_STATS 7
#define WLAN_LRO_STATS 8
#define WLAN_SCHEDULER_STATS 21
#define WLAN_TX_QUEUE_STATS 22
#define WLAN_BUNDLE_STATS 23
#define WLAN_CREDIT_STATS 24
#define OL_TXRX_INVALID_NUM_PEERS (-1)
#define OL_TXRX_MAC_ADDR_LEN 6
/* Maximum number of station supported by data path, including BC. */
#define WLAN_MAX_STA_COUNT (HAL_NUM_STA)
/* The symbolic station ID return to HDD to specify the packet is bc/mc */
#define WLAN_RX_BCMC_STA_ID (WLAN_MAX_STA_COUNT + 1)
/* The symbolic station ID return to HDD to specify the packet is
to soft-AP itself */
#define WLAN_RX_SAP_SELF_STA_ID (WLAN_MAX_STA_COUNT + 2)
/* is 802.11 address multicast/broadcast? */
#define IEEE80211_IS_MULTICAST(_a) (*(_a) & 0x01)
#define MAX_PEERS 32
/*
* Bins used for reporting delay histogram:
* bin 0: 0 - 10 ms delay
* bin 1: 10 - 20 ms delay
* bin 2: 20 - 40 ms delay
* bin 3: 40 - 80 ms delay
* bin 4: 80 - 160 ms delay
* bin 5: > 160 ms delay
*/
#define QCA_TX_DELAY_HIST_REPORT_BINS 6
/* BA actions */
#define IEEE80211_ACTION_BA_ADDBA_REQUEST 0 /* ADDBA request */
#define IEEE80211_ACTION_BA_ADDBA_RESPONSE 1 /* ADDBA response */
#define IEEE80211_ACTION_BA_DELBA 2 /* DELBA */
#define IEEE80211_BA_POLICY_DELAYED 0
#define IEEE80211_BA_POLICY_IMMEDIATE 1
#define IEEE80211_BA_AMSDU_SUPPORTED 1
/**
* enum netif_action_type - Type of actions on netif queues
* @WLAN_STOP_ALL_NETIF_QUEUE: stop all netif queues
* @WLAN_START_ALL_NETIF_QUEUE: start all netif queues
* @WLAN_WAKE_ALL_NETIF_QUEUE: wake all netif queues
* @WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER: stop all queues and off carrier
* @WLAN_START_ALL_NETIF_QUEUE_N_CARRIER: start all queues and on carrier
* @WLAN_NETIF_TX_DISABLE: disable tx
* @WLAN_NETIF_TX_DISABLE_N_CARRIER: disable tx and off carrier
* @WLAN_NETIF_CARRIER_ON: on carrier
* @WLAN_NETIF_CARRIER_OFF: off carrier
*/
enum netif_action_type {
WLAN_STOP_ALL_NETIF_QUEUE = 1,
WLAN_START_ALL_NETIF_QUEUE,
WLAN_WAKE_ALL_NETIF_QUEUE,
WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER,
WLAN_START_ALL_NETIF_QUEUE_N_CARRIER,
WLAN_NETIF_TX_DISABLE,
WLAN_NETIF_TX_DISABLE_N_CARRIER,
WLAN_NETIF_CARRIER_ON,
WLAN_NETIF_CARRIER_OFF,
WLAN_NETIF_ACTION_TYPE_MAX,
};
/**
* enum netif_reason_type - reason for netif queue action
* @WLAN_CONTROL_PATH: action from control path
* @WLAN_DATA_FLOW_CONTROL: because of flow control
* @WLAN_FW_PAUSE: because of firmware pause
* @WLAN_TX_ABORT: because of tx abort
* @WLAN_VDEV_STOP: because of vdev stop
* @WLAN_PEER_UNAUTHORISED: because of peer is unauthorised
* @WLAN_THERMAL_MITIGATION: because of thermal mitigation
*/
enum netif_reason_type {
WLAN_CONTROL_PATH = 1,
WLAN_DATA_FLOW_CONTROL,
WLAN_FW_PAUSE,
WLAN_TX_ABORT,
WLAN_VDEV_STOP,
WLAN_PEER_UNAUTHORISED,
WLAN_THERMAL_MITIGATION,
WLAN_REASON_TYPE_MAX,
};
enum ol_rx_err_type {
OL_RX_ERR_DEFRAG_MIC,
OL_RX_ERR_PN,
OL_RX_ERR_UNKNOWN_PEER,
OL_RX_ERR_MALFORMED,
OL_RX_ERR_TKIP_MIC,
OL_RX_ERR_DECRYPT,
OL_RX_ERR_MPDU_LENGTH,
OL_RX_ERR_ENCRYPT_REQUIRED,
OL_RX_ERR_DUP,
OL_RX_ERR_UNKNOWN,
OL_RX_ERR_FCS,
OL_RX_ERR_PRIVACY,
OL_RX_ERR_NONE_FRAG,
OL_RX_ERR_NONE = 0xFF
};
enum throttle_level {
THROTTLE_LEVEL_0,
THROTTLE_LEVEL_1,
THROTTLE_LEVEL_2,
THROTTLE_LEVEL_3,
/* Invalid */
THROTTLE_LEVEL_MAX,
};
enum {
OL_TX_WMM_AC_BE,
OL_TX_WMM_AC_BK,
OL_TX_WMM_AC_VI,
OL_TX_WMM_AC_VO,
OL_TX_NUM_WMM_AC
};
/**
* @enum ol_tx_spec
* @brief indicate what non-standard transmission actions to apply
* @details
* Indicate one or more of the following:
* - The tx frame already has a complete 802.11 header.
* Thus, skip 802.3/native-WiFi to 802.11 header encapsulation and
* A-MSDU aggregation.
* - The tx frame should not be aggregated (A-MPDU or A-MSDU)
* - The tx frame is already encrypted - don't attempt encryption.
* - The tx frame is a segment of a TCP jumbo frame.
* - This tx frame should not be unmapped and freed by the txrx layer
* after transmission, but instead given to a registered tx completion
* callback.
* More than one of these specification can apply, though typically
* only a single specification is applied to a tx frame.
* A compound specification can be created, as a bit-OR of these
* specifications.
*/
enum ol_tx_spec {
OL_TX_SPEC_STD = 0x0, /* do regular processing */
OL_TX_SPEC_RAW = 0x1, /* skip encap + A-MSDU aggr */
OL_TX_SPEC_NO_AGGR = 0x2, /* skip encap + all aggr */
OL_TX_SPEC_NO_ENCRYPT = 0x4, /* skip encap + encrypt */
OL_TX_SPEC_TSO = 0x8, /* TCP segmented */
OL_TX_SPEC_NWIFI_NO_ENCRYPT = 0x10, /* skip encrypt for nwifi */
OL_TX_SPEC_NO_FREE = 0x20, /* give to cb rather than free */
};
/**
* struct ol_txrx_desc_type - txrx descriptor type
* @sta_id: sta id
* @is_qos_enabled: is station qos enabled
* @is_wapi_supported: is station wapi supported
*/
struct ol_txrx_desc_type {
uint8_t sta_id;
uint8_t is_qos_enabled;
uint8_t is_wapi_supported;
};
/**
* struct txrx_pdev_cfg_param_t - configuration information
* passed to the data path
*/
struct txrx_pdev_cfg_param_t {
uint8_t is_full_reorder_offload;
/* IPA Micro controller data path offload enable flag */
uint8_t is_uc_offload_enabled;
/* IPA Micro controller data path offload TX buffer count */
uint32_t uc_tx_buffer_count;
/* IPA Micro controller data path offload TX buffer size */
uint32_t uc_tx_buffer_size;
/* IPA Micro controller data path offload RX indication ring count */
uint32_t uc_rx_indication_ring_count;
/* IPA Micro controller data path offload TX partition base */
uint32_t uc_tx_partition_base;
/* IP, TCP and UDP checksum offload */
bool ip_tcp_udp_checksum_offload;
/* Rx processing in thread from TXRX */
bool enable_rxthread;
/* CE classification enabled through INI */
bool ce_classify_enabled;
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
/* Threshold to stop queue in percentage */
uint32_t tx_flow_stop_queue_th;
/* Start queue offset in percentage */
uint32_t tx_flow_start_queue_offset;
#endif
};
/**
* ol_txrx_ipa_resources - Resources needed for IPA
*/
struct ol_txrx_ipa_resources {
qdf_dma_addr_t ce_sr_base_paddr;
uint32_t ce_sr_ring_size;
qdf_dma_addr_t ce_reg_paddr;
qdf_dma_addr_t tx_comp_ring_base_paddr;
uint32_t tx_comp_ring_size;
uint32_t tx_num_alloc_buffer;
qdf_dma_addr_t rx_rdy_ring_base_paddr;
uint32_t rx_rdy_ring_size;
qdf_dma_addr_t rx_proc_done_idx_paddr;
void *rx_proc_done_idx_vaddr;
qdf_dma_addr_t rx2_rdy_ring_base_paddr;
uint32_t rx2_rdy_ring_size;
qdf_dma_addr_t rx2_proc_done_idx_paddr;
void *rx2_proc_done_idx_vaddr;
};
struct ol_txrx_ocb_chan_info {
uint32_t chan_freq;
uint16_t disable_rx_stats_hdr:1;
};
/**
* ol_mic_error_info - carries the information associated with
* a MIC error
* @vdev_id: virtual device ID
* @key_id: Key ID
* @pn: packet number
* @sa: source address
* @da: destination address
* @ta: transmitter address
*/
struct ol_mic_error_info {
uint8_t vdev_id;
uint32_t key_id;
uint64_t pn;
uint8_t sa[OL_TXRX_MAC_ADDR_LEN];
uint8_t da[OL_TXRX_MAC_ADDR_LEN];
uint8_t ta[OL_TXRX_MAC_ADDR_LEN];
};
/**
* ol_error_info - carries the information associated with an
* error indicated by the firmware
* @mic_err: MIC error information
*/
struct ol_error_info {
union {
struct ol_mic_error_info mic_err;
} u;
};
/**
* struct ol_txrx_ocb_set_chan - txrx OCB channel info
* @ocb_channel_count: Channel count
* @ocb_channel_info: OCB channel info
*/
struct ol_txrx_ocb_set_chan {
uint32_t ocb_channel_count;
struct ol_txrx_ocb_chan_info *ocb_channel_info;
};
/**
* @brief Parameter type to pass WMM setting to ol_txrx_set_wmm_param
* @details
* The struct is used to specify informaiton to update TX WMM scheduler.
*/
struct ol_tx_ac_param_t {
uint32_t aifs;
uint32_t cwmin;
uint32_t cwmax;
};
struct ol_tx_wmm_param_t {
struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC];
};
struct ieee80211_ba_parameterset {
#if _BYTE_ORDER == _BIG_ENDIAN
uint16_t buffersize:10, /* B6-15 buffer size */
tid:4, /* B2-5 TID */
bapolicy:1, /* B1 block ack policy */
amsdusupported:1; /* B0 amsdu supported */
#else
uint16_t amsdusupported:1, /* B0 amsdu supported */
bapolicy:1, /* B1 block ack policy */
tid:4, /* B2-5 TID */
buffersize:10; /* B6-15 buffer size */
#endif
} __packed;
struct ieee80211_ba_seqctrl {
#if _BYTE_ORDER == _BIG_ENDIAN
uint16_t startseqnum:12, /* B4-15 starting sequence number */
fragnum:4; /* B0-3 fragment number */
#else
uint16_t fragnum:4, /* B0-3 fragment number */
startseqnum:12; /* B4-15 starting sequence number */
#endif
} __packed;
struct ieee80211_delba_parameterset {
#if _BYTE_ORDER == _BIG_ENDIAN
uint16_t tid:4, /* B12-15 tid */
initiator:1, /* B11 initiator */
reserved0:11; /* B0-10 reserved */
#else
uint16_t reserved0:11, /* B0-10 reserved */
initiator:1, /* B11 initiator */
tid:4; /* B12-15 tid */
#endif
} __packed;
typedef QDF_STATUS(*rx_callback_fp)(void *p_cds_gctx,
qdf_nbuf_t pDataBuff,
uint8_t ucSTAId);
/**
* ol_txrx_vdev_peer_remove_cb - wma_remove_peer callback
*/
typedef void (*ol_txrx_vdev_peer_remove_cb)(void *handle, uint8_t *bssid,
uint8_t vdev_id, void *peer, bool roam_synch_in_progress);
/**
* ol_txrx_tx_flow_control_fp - tx flow control notification
* function from txrx to OS shim
* @osif_dev - the virtual device's OS shim object
* @tx_resume - tx os q should be resumed or not
*/
typedef void (*ol_txrx_tx_flow_control_fp)(void *osif_dev, bool tx_resume);
/**
* ol_txrx_tx_flow_control_fp - tx flow control notification
* function from txrx to OS shim
* @osif_dev - the virtual device's OS shim object
* @tx_resume - tx os q should be resumed or not
*/
typedef void (*tx_flow_control_fp)(void *osif_dev,
bool tx_resume);
/**
* @typedef ol_tx_pause_callback_fp
* @brief OSIF function registered with the data path
*/
typedef void (*ol_tx_pause_callback_fp)(uint8_t vdev_id,
enum netif_action_type action,
enum netif_reason_type reason);
typedef void (*ipa_op_cb_type)(uint8_t *op_msg,
void *osif_ctxt);
#endif /* __CDP_TXRX_MOB_DEF_H */

View File

@@ -27,32 +27,51 @@
#ifndef _CDP_TXRX_OCB_H_
#define _CDP_TXRX_OCB_H_
#include <cdp_txrx_mob_def.h>
/**
* struct ol_txrx_ocb_set_chan - txrx OCB channel info
* @ocb_channel_count: Channel count
* @ocb_channel_info: OCB channel info
*/
struct ol_txrx_ocb_set_chan {
uint32_t ocb_channel_count;
struct ol_txrx_ocb_chan_info *ocb_channel_info;
};
/**
* ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
* cdp_set_ocb_chan_info() - set OCB channel info to vdev.
* @soc - data path soc handle
* @vdev: vdev handle
* @ocb_set_chan: OCB channel information to be set in vdev.
*
* Return: NONE
*/
void ol_txrx_set_ocb_chan_info(ol_txrx_vdev_handle vdev,
struct ol_txrx_ocb_set_chan ocb_set_chan);
static inline void
cdp_set_ocb_chan_info(ol_txrx_soc_handle soc, void *vdev,
struct ol_txrx_ocb_set_chan ocb_set_chan)
{
if (!soc || !soc->ops || !soc->ops->ocb_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->ocb_ops->set_ocb_chan_info)
return soc->ops->ocb_ops->set_ocb_chan_info(vdev,
ocb_set_chan);
return;
}
/**
* ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
* cdp_get_ocb_chan_info() - return handle to vdev ocb_channel_info
* @soc - data path soc handle
* @vdev: vdev handle
*
* Return: handle to struct ol_txrx_ocb_chan_info
*/
struct ol_txrx_ocb_chan_info *
ol_txrx_get_ocb_chan_info(ol_txrx_vdev_handle vdev);
static inline struct ol_txrx_ocb_chan_info *
cdp_get_ocb_chan_info(ol_txrx_soc_handle soc, void *vdev)
{
if (!soc || !soc->ops || !soc->ops->ocb_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->ocb_ops->get_ocb_chan_info)
return soc->ops->ocb_ops->get_ocb_chan_info(vdev);
return NULL;
}
#endif /* _CDP_TXRX_OCB_H_ */

View File

@@ -52,6 +52,8 @@ struct cdp_cmn_ops {
(ol_txrx_soc_handle soc, void *ctrl_pdev,
HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
int (*txrx_pdev_post_attach)(void *pdev);
void(*txrx_pdev_detach)(void *pdev, int force);
void *(*txrx_peer_attach)
@@ -108,8 +110,8 @@ struct cdp_cmn_ops {
int(*txrx_aggr_cfg)(void *vdev, int max_subfrms_ampdu,
int max_subfrms_amsdu);
int(*txrx_fw_stats_get)(void *vdev,
struct ol_txrx_stats_req *req, bool per_vdev, bool response_expected);
A_STATUS(*txrx_fw_stats_get)(void *vdev, struct ol_txrx_stats_req *req,
bool per_vdev, bool response_expected);
int(*txrx_debug)(void *vdev, int debug_specs);
@@ -155,7 +157,7 @@ struct cdp_cmn_ops {
(*txrx_get_ctrl_pdev_from_vdev)(void *vdev);
void *
(*txrx_get_vdev_from_vdev_id)(uint8_t vdev_id);
(*txrx_get_vdev_from_vdev_id)(void *pdev, uint8_t vdev_id);
void (*txrx_soc_detach)(void *soc);
@@ -377,7 +379,7 @@ struct cdp_host_stats_ops {
void (*txrx_host_ce_stats)(void *vdev);
int (*txrx_stats_publish)(void *pdev,
struct ol_txrx_stats *buf);
void *buf);
/**
* @brief Enable enhanced stats functionality.
*
@@ -451,15 +453,12 @@ struct cdp_raw_ops {
(*rsim_tx_encap)(void *vdev, qdf_nbuf_t *pnbuf);
};
#ifdef CONFIG_WIN
struct cdp_pflow_ops {
uint32_t(*pflow_update_pdev_params)(void *,
ol_ath_param_t, uint32_t, void *);
};
struct cdp_mob_drv_ops {
/* FIXME to be fixed */
};
#endif /* CONFIG_WIN */
struct ol_if_ops {
void (*peer_set_default_routing)(void *scn_handle,
@@ -474,7 +473,283 @@ struct ol_if_ops {
/* TODO: Add any other control path calls required to OL_IF/WMA layer */
};
#ifndef CONFIG_WIN
/* From here MCL specific OPs */
/**
* struct cdp_misc_ops - mcl ops not classified
* @set_ibss_vdev_heart_beat_timer:
* @bad_peer_txctl_set_setting:
* @bad_peer_txctl_update_threshold:
* @hl_tdls_flag_reset:
* @tx_non_std:
* @get_vdev_id:
* @set_wisa_mode:
* @runtime_suspend:
* @runtime_resume:
*/
struct cdp_misc_ops {
uint16_t (*set_ibss_vdev_heart_beat_timer)(void *vdev,
uint16_t timer_value_sec);
void (*set_wmm_param)(void *cfg_pdev,
struct ol_tx_wmm_param_t wmm_param);
void (*bad_peer_txctl_set_setting)(void *pdev, int enable,
int period, int txq_limit);
void (*bad_peer_txctl_update_threshold)(void *pdev,
int level, int tput_thresh, int tx_limit);
void (*hl_tdls_flag_reset)(void *vdev, bool flag);
qdf_nbuf_t (*tx_non_std)(void *vdev,
enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
uint16_t (*get_vdev_id)(void *vdev);
QDF_STATUS (*set_wisa_mode)(void *vdev, bool enable);
QDF_STATUS (*runtime_suspend)(void *pdev);
QDF_STATUS (*runtime_resume)(void *pdev);
int (*get_opmode)(void *vdev);
void (*mark_first_wakeup_packet)(uint8_t value);
void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
void (*flush_rx_frames)(void *peer, bool drop);
A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
void (*pkt_log_init)(void *handle, void *scn);
void (*pkt_log_con_service)(void *pdev, void *scn);
};
/**
* struct cdp_tx_delay_ops - mcl tx delay ops
* @tx_delay:
* @tx_delay_hist:
* @tx_packet_count:
* @tx_set_compute_interval:
*/
struct cdp_tx_delay_ops {
void (*tx_delay)(void *pdev, uint32_t *queue_delay_microsec,
uint32_t *tx_delay_microsec, int category);
void (*tx_delay_hist)(void *pdev,
uint16_t *bin_values, int category);
void (*tx_packet_count)(void *pdev, uint16_t *out_packet_count,
uint16_t *out_packet_loss_count, int category);
void (*tx_set_compute_interval)(void *pdev, uint32_t interval);
};
/**
* struct cdp_pmf_ops - mcl protected management frame ops
* @get_pn_info:
*/
struct cdp_pmf_ops {
void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
uint64_t **last_pn, uint32_t **rmf_pn_replays);
};
/**
* struct cdp_cfg_ops - mcl configuration ops
* @set_cfg_rx_fwd_disabled:
* @set_cfg_packet_log_enabled:
* @cfg_attach:
* @vdev_rx_set_intrabss_fwd:
* @get_opmode:
* @is_rx_fwd_disabled:
* @tx_set_is_mgmt_over_wmi_enabled:
* @is_high_latency:
* @set_flow_control_parameters:
*/
struct cdp_cfg_ops {
void (*set_cfg_rx_fwd_disabled)(void *pdev, uint8_t disable_rx_fwd);
void (*set_cfg_packet_log_enabled)(void *pdev, uint8_t val);
void * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
void (*vdev_rx_set_intrabss_fwd)(void *vdev, bool val);
uint8_t (*is_rx_fwd_disabled)(void *vdev);
void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
int (*is_high_latency)(void *pdev);
void (*set_flow_control_parameters)(void *cfg, void *param);
void (*set_flow_steering)(void *pdev, uint8_t val);
};
/**
* struct cdp_flowctl_ops - mcl flow control
* @register_pause_cb:
* @set_desc_global_pool_size:
* @dump_flow_pool_info:
*/
struct cdp_flowctl_ops {
QDF_STATUS (*register_pause_cb)(ol_tx_pause_callback_fp);
void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
void (*dump_flow_pool_info)(void);
};
/**
* struct cdp_lflowctl_ops - mcl legacy flow control ops
* @register_tx_flow_control:
* @deregister_tx_flow_control_cb:
* @flow_control_cb:
* @get_tx_resource:
* @ll_set_tx_pause_q_depth:
* @vdev_flush:
* @vdev_pause:
* @vdev_unpause:
*/
struct cdp_lflowctl_ops {
int (*register_tx_flow_control)(uint8_t vdev_id,
ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx);
int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
void (*flow_control_cb)(void *vdev, bool tx_resume);
bool (*get_tx_resource)(uint8_t sta_id,
unsigned int low_watermark,
unsigned int high_watermark_offset);
int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
void (*vdev_flush)(void *vdev);
void (*vdev_pause)(void *vdev, uint32_t reason);
void (*vdev_unpause)(void *vdev, uint32_t reason);
};
/**
* struct cdp_ipa_ops - mcl ipa data path ops
* @ipa_get_resource:
* @ipa_set_doorbell_paddr:
* @ipa_set_active:
* @ipa_op_response:
* @ipa_register_op_cb:
* @ipa_get_stat:
* @ipa_tx_data_frame:
*/
struct cdp_ipa_ops {
void (*ipa_get_resource)(void *pdev,
struct ol_txrx_ipa_resources *ipa_res);
void (*ipa_set_doorbell_paddr)(void *pdev,
qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
qdf_dma_addr_t ipa_rx_uc_doorbell_paddr);
void (*ipa_set_active)(void *pdev, bool uc_active, bool is_tx);
void (*ipa_op_response)(void *pdev, uint8_t *op_msg);
void (*ipa_register_op_cb)(void *pdev,
void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
void *osif_dev);
void (*ipa_get_stat)(void *pdev);
qdf_nbuf_t (*ipa_tx_data_frame)(void *vdev, qdf_nbuf_t skb);
void (*ipa_set_uc_tx_partition_base)(void *pdev, uint32_t value);
};
/**
* struct cdp_lro_ops - mcl large receive offload ops
* @register_lro_flush_cb:
* @deregister_lro_flush_cb:
*/
struct cdp_lro_ops {
void (*register_lro_flush_cb)(void (lro_flush_cb)(void *),
void *(lro_init_cb)(void));
void (*deregister_lro_flush_cb)(void (lro_deinit_cb)(void *));
};
/**
* struct cdp_bus_ops - mcl bus suspend/resume ops
* @bus_suspend:
* @bus_resume:
*/
struct cdp_bus_ops {
QDF_STATUS (*bus_suspend)(void);
QDF_STATUS (*bus_resume)(void);
};
/**
* struct cdp_ocb_ops - mcl ocb ops
* @set_ocb_chan_info:
* @get_ocb_chan_info:
*/
struct cdp_ocb_ops {
void (*set_ocb_chan_info)(void *vdev,
struct ol_txrx_ocb_set_chan ocb_set_chan);
struct ol_txrx_ocb_chan_info * (*get_ocb_chan_info)(void *vdev);
};
/**
* struct cdp_peer_ops - mcl peer related ops
* @register_peer:
* @clear_peer:
* @cfg_attach:
* @find_peer_by_addr:
* @find_peer_by_addr_and_vdev:
* @local_peer_id:
* @peer_find_by_local_id:
* @peer_state_update:
* @get_vdevid:
* @get_vdev_by_sta_id:
* @register_ocb_peer:
* @peer_get_peer_mac_addr:
* @get_peer_state:
* @get_vdev_for_peer:
* @update_ibss_add_peer_num_of_vdev:
* @remove_peers_for_vdev:
* @remove_peers_for_vdev_no_lock:
* @copy_mac_addr_raw:
* @add_last_real_peer:
* @last_assoc_received:
* @last_disassoc_received:
* @last_deauth_received:
* @is_vdev_restore_last_peer:
* @update_last_real_peer:
*/
struct cdp_peer_ops {
QDF_STATUS (*register_peer)(void *pdev,
struct ol_txrx_desc_type *sta_desc);
QDF_STATUS (*clear_peer)(void *pdev, uint8_t sta_id);
QDF_STATUS (*change_peer_state)(uint8_t sta_id,
enum ol_txrx_peer_state sta_state,
bool roam_synch_in_progress);
void * (*find_peer_by_addr)(void *pdev,
uint8_t *peer_addr, uint8_t *peer_id);
void * (*find_peer_by_addr_and_vdev)(void *pdev, void *vdev,
uint8_t *peer_addr, uint8_t *peer_id);
uint16_t (*local_peer_id)(void *peer);
void * (*peer_find_by_local_id)(void *pdev, uint8_t local_peer_id);
QDF_STATUS (*peer_state_update)(void *pdev, uint8_t *peer_addr,
enum ol_txrx_peer_state state);
QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
void * (*get_vdev_by_sta_id)(uint8_t sta_id);
QDF_STATUS (*register_ocb_peer)(void *cds_ctx, uint8_t *mac_addr,
uint8_t *peer_id);
uint8_t * (*peer_get_peer_mac_addr)(void *peer);
int (*get_peer_state)(void *peer);
void * (*get_vdev_for_peer)(void *peer);
int16_t (*update_ibss_add_peer_num_of_vdev)(void *vdev,
int16_t peer_num_delta);
void (*remove_peers_for_vdev)(void *vdev,
ol_txrx_vdev_peer_remove_cb callback,
void *callback_context, bool remove_last_peer);
void (*remove_peers_for_vdev_no_lock)(void *vdev,
ol_txrx_vdev_peer_remove_cb callback,
void *callback_context);
void (*copy_mac_addr_raw)(void *vdev, uint8_t *bss_addr);
void (*add_last_real_peer)(void *pdev, void *vdev, uint8_t *peer_id);
qdf_time_t * (*last_assoc_received)(void *peer);
qdf_time_t * (*last_disassoc_received)(void *peer);
qdf_time_t * (*last_deauth_received)(void *peer);
bool (*is_vdev_restore_last_peer)(void *peer);
void (*update_last_real_peer)(void *pdev, void *peer,
uint8_t *peer_id, bool restore_last_peer);
void (*peer_detach_force_delete)(void *peer);
};
/**
* struct cdp_ocb_ops - mcl ocb ops
* @throttle_init_period:
* @throttle_set_level:
*/
struct cdp_throttle_ops {
void (*throttle_init_period)(void *pdev, int period,
uint8_t *dutycycle_level);
void (*throttle_set_level)(void *pdev, int level);
};
/**
* struct cdp_ocb_ops - mcl ocb ops
* @display_stats:
* @clear_stats:
* @stats:
*/
struct cdp_mob_stats_ops {
void (*display_stats)(uint16_t bitmap);
void (*clear_stats)(uint16_t bitmap);
int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
};
#endif /* CONFIG_WIN */
struct cdp_ops {
struct cdp_cmn_ops *cmn_drv_ops;
@@ -485,7 +760,21 @@ struct cdp_ops {
struct cdp_wds_ops *wds_ops;
struct cdp_raw_ops *raw_ops;
struct cdp_pflow_ops *pflow_ops;
struct cdp_mob_ops *mob_drv_ops;
#ifndef CONFIG_WIN
struct cdp_misc_ops *misc_ops;
struct cdp_cfg_ops *cfg_ops;
struct cdp_flowctl_ops *flowctl_ops;
struct cdp_lflowctl_ops *l_flowctl_ops;
struct cdp_ipa_ops *ipa_ops;
struct cdp_lro_ops *lro_ops;
struct cdp_bus_ops *bus_ops;
struct cdp_ocb_ops *ocb_ops;
struct cdp_peer_ops *peer_ops;
struct cdp_throttle_ops *throttle_ops;
struct cdp_mob_stats_ops *mob_stats_ops;
struct cdp_tx_delay_ops *delay_ops;
struct cdp_pmf_ops *pmf_ops;
#endif /* CONFIG_WIN */
};
#endif

View File

@@ -31,148 +31,616 @@
*/
#ifndef _CDP_TXRX_PEER_H_
#define _CDP_TXRX_PEER_H_
typedef QDF_STATUS(*ol_rx_callback_fp)(void *p_cds_gctx,
qdf_nbuf_t pDataBuff,
uint8_t ucSTAId);
#include <cdp_txrx_ops.h>
/**
* struct ol_txrx_peer_state - Peer state information
*/
enum ol_txrx_peer_state {
OL_TXRX_PEER_STATE_INVALID,
OL_TXRX_PEER_STATE_DISC, /* initial state */
OL_TXRX_PEER_STATE_CONN, /* authentication in progress */
OL_TXRX_PEER_STATE_AUTH, /* authentication successful */
};
/**
* struct ol_txrx_desc_type - txrx descriptor type
* @sta_id: sta id
* @is_qos_enabled: is station qos enabled
* @is_wapi_supported: is station wapi supported
*/
struct ol_txrx_desc_type {
uint8_t sta_id;
uint8_t is_qos_enabled;
uint8_t is_wapi_supported;
};
QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc);
/**
* ol_txrx_vdev_peer_remove_cb - wma_remove_peer callback
*/
typedef void (*ol_txrx_vdev_peer_remove_cb)(void *handle, uint8_t *bssid,
uint8_t vdev_id,
ol_txrx_peer_handle peer,
bool roam_synch_in_progress);
QDF_STATUS ol_txrx_clear_peer(uint8_t sta_id);
QDF_STATUS ol_txrx_change_peer_state(uint8_t sta_id,
enum ol_txrx_peer_state sta_state,
bool roam_synch_in_progress);
ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
uint8_t *peer_addr,
uint8_t *peer_id);
ol_txrx_peer_handle
ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
ol_txrx_vdev_handle vdev,
uint8_t *peer_addr, uint8_t *peer_id);
#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer);
ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
uint8_t *peer_addr,
uint8_t *peer_id);
ol_txrx_peer_handle
ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
ol_txrx_vdev_handle vdev,
uint8_t *peer_addr, uint8_t *peer_id);
ol_txrx_peer_handle
ol_txrx_peer_find_by_local_id(ol_txrx_pdev_handle pdev, uint8_t local_peer_id);
#else
#define ol_txrx_local_peer_id(peer) OL_TXRX_INVALID_LOCAL_PEER_ID
#define ol_txrx_find_peer_by_addr(pdev, peer_addr, peer_id) NULL
#define ol_txrx_find_peer_by_addr_and_vdev(pdev, vdev, peer_addr, peer_id) NULL
#define ol_txrx_peer_find_by_local_id(pdev, local_peer_id) NULL
#endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
QDF_STATUS
ol_txrx_peer_state_update(ol_txrx_pdev_handle pdev, uint8_t *peer_addr,
enum ol_txrx_peer_state state);
QDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id);
void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id);
QDF_STATUS ol_txrx_register_ocb_peer(void *cds_ctx, uint8_t *mac_addr,
uint8_t *peer_id);
/**
* ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
* @peer: handle to peer
* cdp_peer_register() - Register peer into physical device
* @soc - data path soc handle
* @pdev - data path device instance
* @sta_desc - peer description
*
* returns mac addrs for module which do not know peer type
* Register peer into physical device
*
* Return: the mac_addr from peer
* Return: QDF_STATUS_SUCCESS registration success
* QDF_STATUS_E_NOSUPPORT not support this feature
*/
uint8_t *ol_txrx_peer_get_peer_mac_addr(ol_txrx_peer_handle peer);
static inline QDF_STATUS
cdp_peer_register(ol_txrx_soc_handle soc, void *pdev,
struct ol_txrx_desc_type *sta_desc)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->peer_ops->register_peer)
return soc->ops->peer_ops->register_peer(pdev, sta_desc);
return QDF_STATUS_E_NOSUPPORT;
}
/**
* ol_txrx_get_peer_state() - Return peer state of peer
* @peer: peer handle
* cdp_peer_clear() - remove peer from physical device
* @soc - data path soc handle
* @pdev - data path device instance
* @sta_id - local peer id
*
* Return: return peer state
* remove peer from physical device
*
* Return: QDF_STATUS_SUCCESS registration success
* QDF_STATUS_E_NOSUPPORT not support this feature
*/
int ol_txrx_get_peer_state(ol_txrx_peer_handle peer);
static inline QDF_STATUS
cdp_peer_clear(ol_txrx_soc_handle soc, void *pdev, uint8_t sta_id)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
return QDF_STATUS_E_NOSUPPORT;
}
/**
* ol_txrx_get_vdev_for_peer() - Return vdev from peer handle
* @peer: peer handle
* cdp_peer_register_ocb_peer() - register ocb peer from physical device
* @soc - data path soc handle
* @cds_ctx - cds void context
* @mac_addr - mac address for ocb self peer
* @peer_id - local peer id
*
* Return: vdev handle from peer
* register ocb peer from physical device
*
* Return: QDF_STATUS_SUCCESS registration success
* QDF_STATUS_E_NOSUPPORT not support this feature
*/
ol_txrx_vdev_handle
ol_txrx_get_vdev_for_peer(ol_txrx_peer_handle peer);
static inline QDF_STATUS
cdp_peer_register_ocb_peer(ol_txrx_soc_handle soc, void *cds_ctx,
uint8_t *mac_addr, uint8_t *peer_id)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->peer_ops->register_ocb_peer)
return soc->ops->peer_ops->register_ocb_peer(cds_ctx,
mac_addr, peer_id);
return QDF_STATUS_E_NOSUPPORT;
}
/**
* ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
* @vdev: vdev handle
* @peer_num_delta: peer nums to be adjusted
* cdp_peer_remove_for_vdev() - remove peer instance from virtual interface
* @soc - data path soc handle
* @vdev - virtual interface instance
* @callback - remove done notification callback function pointer
* @callback_context - callback caller context
* @remove_last_peer - removed peer is last peer or not
*
* Return: -1 for failure or total peer nums after adjustment.
*/
int16_t
ol_txrx_update_ibss_add_peer_num_of_vdev(ol_txrx_vdev_handle vdev,
int16_t peer_num_delta);
/**
* ol_txrx_remove_peers_for_vdev() - remove all vdev peers with lock held
* @vdev: vdev handle
* @callback: callback function to remove the peer.
* @callback_context: handle for callback function
* @remove_last_peer: Does it required to last peer.
* remove peer instance from virtual interface
*
* Return: NONE
*/
void
ol_txrx_remove_peers_for_vdev(ol_txrx_vdev_handle vdev,
ol_txrx_vdev_peer_remove_cb callback,
void *callback_context, bool remove_last_peer);
static inline void
cdp_peer_remove_for_vdev(ol_txrx_soc_handle soc,
void *vdev, ol_txrx_vdev_peer_remove_cb callback,
void *callback_context, bool remove_last_peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->peer_ops->remove_peers_for_vdev)
return soc->ops->peer_ops->remove_peers_for_vdev(
vdev, callback, callback_context, remove_last_peer);
return;
}
/**
* ol_txrx_remove_peers_for_vdev_no_lock() - remove vdev peers with no lock.
* @vdev: vdev handle
* @callback: callback function to remove the peer.
* @callback_context: handle for callback function
* cdp_peer_find_by_addr() - Find peer by peer mac address
* @soc - data path soc handle
* @pdev - data path device instance
* @peer_addr - peer mac address
* @peer_id - local peer id with target mac address
*
* Return: NONE
* Find peer and local peer id by peer mac address
*
* Return: peer instance void pointer
* NULL cannot find target peer
*/
void
ol_txrx_remove_peers_for_vdev_no_lock(ol_txrx_vdev_handle vdev,
ol_txrx_vdev_peer_remove_cb callback,
void *callback_context);
static inline void
*cdp_peer_find_by_addr(ol_txrx_soc_handle soc, void *pdev,
uint8_t *peer_addr, uint8_t *peer_id)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->peer_ops->find_peer_by_addr)
return soc->ops->peer_ops->find_peer_by_addr(
pdev, peer_addr, peer_id);
return NULL;
}
/**
* cdp_peer_find_by_addr_and_vdev() - Find peer by peer mac address within vdev
* @soc - data path soc handle
* @pdev - data path device instance
* @vdev - virtual interface instance
* @peer_addr - peer mac address
* @peer_id - local peer id with target mac address
*
* Find peer by peer mac address within vdev
*
* Return: peer instance void pointer
* NULL cannot find target peer
*/
static inline void
*cdp_peer_find_by_addr_and_vdev(ol_txrx_soc_handle soc, void *pdev,
void *vdev, uint8_t *peer_addr, uint8_t *peer_id)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->peer_ops->find_peer_by_addr_and_vdev)
return soc->ops->peer_ops->find_peer_by_addr_and_vdev(
pdev, vdev, peer_addr, peer_id);
return NULL;
}
/**
* cdp_peer_find_by_local_id() - Find peer by local peer id
* @soc - data path soc handle
* @pdev - data path device instance
* @local_peer_id - local peer id want to find
*
* Find peer by local peer id within physical device
*
* Return: peer instance void pointer
* NULL cannot find target peer
*/
static inline void
*cdp_peer_find_by_local_id(ol_txrx_soc_handle soc, void *pdev,
uint8_t local_peer_id)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->peer_ops->peer_find_by_local_id)
return soc->ops->peer_ops->peer_find_by_local_id(
pdev, local_peer_id);
return NULL;
}
/**
* cdp_peer_state_update() - update peer local state
* @soc - data path soc handle
* @pdev - data path device instance
* @peer_addr - peer mac address
* @state - new peer local state
*
* update peer local state
*
* Return: QDF_STATUS_SUCCESS registration success
* QDF_STATUS_E_NOSUPPORT not support this feature
*/
static inline QDF_STATUS
cdp_peer_state_update(ol_txrx_soc_handle soc, void *pdev,
uint8_t *peer_addr, enum ol_txrx_peer_state state)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->peer_ops->peer_state_update)
return soc->ops->peer_ops->peer_state_update(
pdev, peer_addr, state);
return QDF_STATUS_E_NOSUPPORT;
}
/**
* cdp_peer_state_get() - Get local peer state
* @soc - data path soc handle
* @peer - peer instance
*
* Get local peer state
*
* Return: peer status
*/
static inline int
cdp_peer_state_get(ol_txrx_soc_handle soc, void *peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->peer_ops->get_peer_state)
return soc->ops->peer_ops->get_peer_state(peer);
return 0;
}
/**
* cdp_peer_get_local_peer_id() - Find local peer id within peer instance
* @soc - data path soc handle
* @peer - peer instance
*
* Find local peer id within peer instance
*
* Return: local peer id
* HTT_INVALID_PEER Invalid peer
*/
static inline uint16_t
cdp_peer_get_local_peer_id(ol_txrx_soc_handle soc, void *peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return HTT_INVALID_PEER;
}
if (soc->ops->peer_ops->local_peer_id)
return soc->ops->peer_ops->local_peer_id(peer);
return HTT_INVALID_PEER;
}
/**
* cdp_peer_get_vdevid() - Get virtaul interface id which peer registered
* @soc - data path soc handle
* @peer - peer instance
* @vdev_id - virtaul interface id which peer registered
*
* Get virtaul interface id which peer registered
*
* Return: QDF_STATUS_SUCCESS registration success
* QDF_STATUS_E_NOSUPPORT not support this feature
*/
static inline QDF_STATUS
cdp_peer_get_vdevid(ol_txrx_soc_handle soc, void *peer, uint8_t *vdev_id)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_INVAL;
}
if (soc->ops->peer_ops->get_vdevid)
return soc->ops->peer_ops->get_vdevid(peer, vdev_id);
return QDF_STATUS_E_NOSUPPORT;
}
/**
* cdp_peer_get_vdev_by_sta_id() - Get vdev instance by local peer id
* @soc - data path soc handle
* @sta_id - local peer id
*
* Get virtaul interface id by local peer id
*
* Return: Virtual interface instance
* NULL in case cannot find
*/
static inline void
*cdp_peer_get_vdev_by_sta_id(ol_txrx_soc_handle soc, uint8_t sta_id)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->peer_ops->get_vdev_by_sta_id)
return soc->ops->peer_ops->get_vdev_by_sta_id(sta_id);
return NULL;
}
/**
* cdp_peer_get_peer_mac_addr() - Get peer mac address
* @soc - data path soc handle
* @peer - peer instance
*
* Get peer mac address
*
* Return: peer mac address pointer
* NULL in case cannot find
*/
static inline uint8_t
*cdp_peer_get_peer_mac_addr(ol_txrx_soc_handle soc, void *peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->peer_ops->peer_get_peer_mac_addr)
return soc->ops->peer_ops->peer_get_peer_mac_addr(peer);
return NULL;
}
/**
* cdp_peer_get_vdev() - Get virtual interface instance which peer belongs
* @soc - data path soc handle
* @peer - peer instance
*
* Get virtual interface instance which peer belongs
*
* Return: virtual interface instance pointer
* NULL in case cannot find
*/
static inline void
*cdp_peer_get_vdev(ol_txrx_soc_handle soc, void *peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->peer_ops->get_vdev_for_peer)
return soc->ops->peer_ops->get_vdev_for_peer(peer);
return NULL;
}
/**
* cdp_peer_update_ibss_add_peer_num_of_vdev() - update number of peer
* @soc - data path soc handle
* @vdev - virtual interface instance
* @peer_num_delta - number of peer should be updated
*
* update number of peer
*
* Return: updated number of peer
* 0 fail
*/
static inline int16_t
cdp_peer_update_ibss_add_peer_num_of_vdev(ol_txrx_soc_handle soc,
void *vdev, int16_t peer_num_delta)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return 0;
}
if (soc->ops->peer_ops->update_ibss_add_peer_num_of_vdev)
return soc->ops->peer_ops->update_ibss_add_peer_num_of_vdev(
vdev, peer_num_delta);
return 0;
}
/**
* cdp_peer_copy_mac_addr_raw() - copy peer mac address
* @soc - data path soc handle
* @vdev - virtual interface instance
* @bss_addr - mac address should be copied
*
* copy peer mac address
*
* Return: none
*/
static inline void
cdp_peer_copy_mac_addr_raw(ol_txrx_soc_handle soc,
void *vdev, uint8_t *bss_addr)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->peer_ops->copy_mac_addr_raw)
return soc->ops->peer_ops->copy_mac_addr_raw(vdev, bss_addr);
return;
}
/**
* cdp_peer_add_last_real_peer() - Add peer with last peer marking
* @soc - data path soc handle
* @pdev - data path device instance
* @vdev - virtual interface instance
* @peer_id - local peer id
*
* copy peer mac address
*
* Return: none
*/
static inline void
cdp_peer_add_last_real_peer(ol_txrx_soc_handle soc,
void *pdev, void *vdev, uint8_t *peer_id)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->peer_ops->add_last_real_peer)
return soc->ops->peer_ops->add_last_real_peer(
pdev, vdev, peer_id);
return;
}
/**
* cdp_peer_last_assoc_received() - last assoc received peer
* @soc - data path soc handle
* @peer - peer instance pointer
*
* !!! This should be implemented on legacy also
* last assoc received peer
*
* Return: pointer
*/
static inline qdf_time_t *
cdp_peer_last_assoc_received(ol_txrx_soc_handle soc, void *peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->peer_ops->last_assoc_received)
return soc->ops->peer_ops->last_assoc_received(peer);
return NULL;
}
/**
* cdp_peer_last_disassoc_received() - last disassoc received peer
* @soc - data path soc handle
* @peer - peer instance pointer
*
* !!! This should be implemented on legacy also
* last disassoc received peer
*
* Return: pointer
*/
static inline qdf_time_t *
cdp_peer_last_disassoc_received(ol_txrx_soc_handle soc, void *peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->peer_ops->last_disassoc_received)
return soc->ops->peer_ops->last_disassoc_received(peer);
return NULL;
}
/**
* cdp_peer_last_deauth_received() - last deauth received peer
* @soc - data path soc handle
* @peer - peer instance pointer
*
* !!! This should be implemented on legacy also
* last deauth received peer
*
* Return: pointer
*/
static inline qdf_time_t *
cdp_peer_last_deauth_received(ol_txrx_soc_handle soc, void *peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return NULL;
}
if (soc->ops->peer_ops->last_deauth_received)
return soc->ops->peer_ops->last_deauth_received(peer);
return NULL;
}
/**
* cdp_peer_is_vdev_restore_last_peer() - restore last peer
* @soc - data path soc handle
* @peer - peer instance pointer
*
* restore last peer
*
* Return: true, restore success
* fasle, restore fail
*/
static inline bool
cdp_peer_is_vdev_restore_last_peer(ol_txrx_soc_handle soc, void *peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return false;
}
if (soc->ops->peer_ops->is_vdev_restore_last_peer)
return soc->ops->peer_ops->is_vdev_restore_last_peer(peer);
return false;
}
/**
* cdp_peer_update_last_real_peer() - update last real peer
* @soc - data path soc handle
* @pdev - data path device instance
* @peer - peer instance pointer
* @peer_id - local peer id
* @restore_last_peer - restore last peer or not
*
* update last real peer
*
* Return: none
*/
static inline void
cdp_peer_update_last_real_peer(ol_txrx_soc_handle soc, void *pdev,
void *peer, uint8_t *peer_id, bool restore_last_peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->peer_ops->update_last_real_peer)
return soc->ops->peer_ops->update_last_real_peer(pdev, peer,
peer_id, restore_last_peer);
return;
}
/**
* ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
* @peer - the object to detach
*
* Detach a peer and force the peer object to be removed. It is called during
* roaming scenario when the firmware has already deleted a peer.
* Peer object is freed immediately to avoid duplicate peers during roam sync
* indication processing.
*
* Return: None
*/
static inline void cdp_peer_detach_force_delete(ol_txrx_soc_handle soc,
void *peer)
{
if (!soc || !soc->ops || !soc->ops->peer_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->peer_ops->peer_detach_force_delete)
return soc->ops->peer_ops->peer_detach_force_delete(peer);
return;
}
#endif /* _CDP_TXRX_PEER_H_ */

View File

@@ -28,7 +28,8 @@
#define _CDP_TXRX_PMF_H_
/**
* ol_txrx_get_pn_info() - Returns pn info from peer
* cdp_get_pn_info() - Returns pn info from peer
* @soc - data path soc handle
* @peer: handle to peer
* @last_pn_valid: return last_rmf_pn_valid value from peer.
* @last_pn: return last_rmf_pn value from peer.
@@ -36,7 +37,20 @@
*
* Return: NONE
*/
void
ol_txrx_get_pn_info(ol_txrx_peer_handle peer, uint8_t **last_pn_valid,
uint64_t **last_pn, uint32_t **rmf_pn_replays);
static inline void
cdp_get_pn_info(ol_txrx_soc_handle soc, void *peer, uint8_t **last_pn_valid,
uint64_t **last_pn, uint32_t **rmf_pn_replays)
{
if (!soc || !soc->ops || !soc->ops->pmf_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->pmf_ops->get_pn_info)
return soc->ops->pmf_ops->get_pn_info(
peer, last_pn_valid, last_pn, rmf_pn_replays);
return;
}
#endif /* _CDP_TXRX_PMF_H_ */

View File

@@ -31,9 +31,31 @@
*/
#ifndef _CDP_TXRX_STATS_H_
#define _CDP_TXRX_STATS_H_
#include <cdp_txrx_ops.h>
void ol_txrx_display_stats(uint16_t bitmap);
void ol_txrx_clear_stats(uint16_t bitmap);
int ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned buf_len);
static inline void
cdp_display_stats(ol_txrx_soc_handle soc, uint16_t bitmap)
{
if (soc->ops->mob_stats_ops->display_stats)
return soc->ops->mob_stats_ops->display_stats(bitmap);
return;
}
static inline void
cdp_clear_stats(ol_txrx_soc_handle soc, uint16_t bitmap)
{
if (soc->ops->mob_stats_ops->clear_stats)
return soc->ops->mob_stats_ops->clear_stats(bitmap);
return;
}
static inline int
cdp_stats(ol_txrx_soc_handle soc, uint8_t vdev_id, char *buffer,
unsigned buf_len)
{
if (soc->ops->mob_stats_ops->stats)
return soc->ops->mob_stats_ops->stats(vdev_id, buffer, buf_len);
return 0;
}
#endif /* _CDP_TXRX_STATS_H_ */

View File

@@ -32,45 +32,105 @@
#ifndef _CDP_TXRX_COMPUTE_TX_DELAY_H_
#define _CDP_TXRX_COMPUTE_TX_DELAY_H_
#ifdef QCA_COMPUTE_TX_DELAY
void
ol_tx_delay(ol_txrx_pdev_handle pdev, uint32_t *queue_delay_microsec,
uint32_t *tx_delay_microsec, int category);
void
ol_tx_delay_hist(ol_txrx_pdev_handle pdev,
uint16_t *bin_values, int category);
void
ol_tx_packet_count(ol_txrx_pdev_handle pdev, uint16_t *out_packet_count,
uint16_t *out_packet_loss_count, int category);
void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev,
uint32_t interval);
#else
/**
* cdp_tx_delay() - get tx packet delay
* @soc: data path soc handle
* @pdev: physical device instance
* @queue_delay_microsec: tx packet delay within queue, usec
* @tx_delay_microsec: tx packet delay, usec
* @category: packet catagory
*
* Return: NONE
*/
static inline void
ol_tx_delay(ol_txrx_pdev_handle pdev, uint32_t *queue_delay_microsec,
uint32_t *tx_delay_microsec, int category)
cdp_tx_delay(ol_txrx_soc_handle soc, void *pdev,
uint32_t *queue_delay_microsec, uint32_t *tx_delay_microsec,
int category)
{
if (!soc || !soc->ops || !soc->ops->delay_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->delay_ops->tx_delay)
return soc->ops->delay_ops->tx_delay(pdev,
queue_delay_microsec, tx_delay_microsec, category);
return;
}
/**
* cdp_tx_delay_hist() - get tx packet delay histogram
* @soc: data path soc handle
* @pdev: physical device instance
* @bin_values: bin
* @category: packet catagory
*
* Return: NONE
*/
static inline void
ol_tx_delay_hist(ol_txrx_pdev_handle pdev,
uint16_t *bin_values, int category)
cdp_tx_delay_hist(ol_txrx_soc_handle soc, void *pdev,
uint16_t *bin_values, int category)
{
if (!soc || !soc->ops || !soc->ops->delay_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->delay_ops->tx_delay_hist)
return soc->ops->delay_ops->tx_delay_hist(pdev,
bin_values, category);
return;
}
/**
* cdp_tx_packet_count() - get tx packet count
* @soc: data path soc handle
* @pdev: physical device instance
* @out_packet_loss_count: packet loss count
* @category: packet catagory
*
* Return: NONE
*/
static inline void
ol_tx_packet_count(ol_txrx_pdev_handle pdev, uint16_t *out_packet_count,
uint16_t *out_packet_loss_count, int category)
cdp_tx_packet_count(ol_txrx_soc_handle soc, void *pdev,
uint16_t *out_packet_count, uint16_t *out_packet_loss_count,
int category)
{
if (!soc || !soc->ops || !soc->ops->delay_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->delay_ops->tx_packet_count)
return soc->ops->delay_ops->tx_packet_count(pdev,
out_packet_count, out_packet_loss_count, category);
return;
}
/**
* cdp_tx_set_compute_interval() - set tx packet stat compute interval
* @soc: data path soc handle
* @pdev: physical device instance
* @interval: compute interval
*
* Return: NONE
*/
static inline void
ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval)
cdp_tx_set_compute_interval(ol_txrx_soc_handle soc, void *pdev,
uint32_t interval)
{
if (!soc || !soc->ops || !soc->ops->delay_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->delay_ops->tx_set_compute_interval)
return soc->ops->delay_ops->tx_set_compute_interval(pdev,
interval);
return;
}
#endif
#endif /* _CDP_TXRX_COMPUTE_TX_DELAY_H_ */

View File

@@ -32,24 +32,53 @@
*/
#ifndef _CDP_TXRX_TX_THROTTLE_H_
#define _CDP_TXRX_TX_THROTTLE_H_
#include <cdp_txrx_ops.h>
#if defined(QCA_SUPPORT_TX_THROTTLE)
void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period,
uint8_t *dutycycle_level);
void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level);
#else
static inline void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev,
int level)
/**
* cdp_throttle_init_period() - init tx throttle period
* @soc: data path soc handle
* @pdev: physical device instance
* @period: throttle period
* @dutycycle_level: duty cycle level
*
* Return: NONE
*/
static inline void
cdp_throttle_init_period(ol_txrx_soc_handle soc, void *pdev, int period,
uint8_t *dutycycle_level)
{
/* no-op */
if (!soc || !soc->ops || !soc->ops->throttle_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->throttle_ops->throttle_init_period)
return soc->ops->throttle_ops->throttle_init_period(pdev,
period, dutycycle_level);
return;
}
static inline void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev,
int period,
uint8_t *dutycycle_level)
/**
* cdp_throttle_init_period() - init tx throttle period
* @soc: data path soc handle
* @pdev: physical device instance
* @level: throttle level
*
* Return: NONE
*/
static inline void
cdp_throttle_set_level(ol_txrx_soc_handle soc, void *pdev, int level)
{
/* no-op */
if (!soc || !soc->ops || !soc->ops->throttle_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->throttle_ops->throttle_set_level)
return soc->ops->throttle_ops->throttle_set_level(pdev, level);
return;
}
#endif
#endif /* _CDP_TXRX_TX_THROTTLE_H_ */

View File

@@ -47,6 +47,12 @@ cdp_set_wds_rx_policy(ol_txrx_soc_handle soc,
void *vdev,
u_int32_t val)
{
if (!soc || !soc->ops || !soc->ops->wds_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return;
}
if (soc->ops->wds_ops->txrx_set_wds_rx_policy)
return soc->ops->wds_ops->txrx_set_wds_rx_policy(vdev, val);
return;