|
@@ -76,6 +76,7 @@
|
|
|
#include "epping_main.h"
|
|
|
#include <a_types.h>
|
|
|
#include <cdp_txrx_handle.h>
|
|
|
+#include <cdp_txrx_cmn_reg.h>
|
|
|
#include "wlan_qct_sys.h"
|
|
|
|
|
|
#include <htt_internal.h>
|
|
@@ -107,8 +108,8 @@ enum dpt_set_param_debugfs {
|
|
|
QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *pdev,
|
|
|
uint8_t *peer_mac,
|
|
|
enum ol_txrx_peer_state state);
|
|
|
-static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *vdev,
|
|
|
- bool val);
|
|
|
+static void ol_vdev_rx_set_intrabss_fwd(struct cdp_soc_t *soc_hdl,
|
|
|
+ uint8_t vdev_id, bool val);
|
|
|
int ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
|
|
|
extern void
|
|
|
ol_txrx_set_wmm_param(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
|
@@ -673,7 +674,7 @@ static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_pdev_attach() - allocate txrx pdev
|
|
|
- * @soc: datapath soc handle
|
|
|
+ * @soc_hdl: datapath soc handle
|
|
|
* @htc_pdev: HTC pdev
|
|
|
* @osdev: os dev
|
|
|
* @pdev_id: pdev identifier for pdev attach
|
|
@@ -898,26 +899,28 @@ static inline void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_pdev_post_attach() - attach txrx pdev
|
|
|
- * @pdev: txrx pdev
|
|
|
+ * @soc_hdl: datapath soc handle
|
|
|
+ * @pdev_id: physical device instance id
|
|
|
*
|
|
|
* Return: 0 for success
|
|
|
*/
|
|
|
int
|
|
|
-ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
|
|
|
+ol_txrx_pdev_post_attach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
|
|
|
{
|
|
|
- struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
uint16_t i;
|
|
|
uint16_t fail_idx = 0;
|
|
|
int ret = 0;
|
|
|
uint16_t desc_pool_size;
|
|
|
struct hif_opaque_softc *osc = cds_get_context(QDF_MODULE_ID_HIF);
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
|
|
|
|
|
|
uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
|
|
|
union ol_tx_desc_list_elem_t *c_element;
|
|
|
unsigned int sig_bit;
|
|
|
uint16_t desc_per_page;
|
|
|
|
|
|
- if (!osc) {
|
|
|
+ if (!osc || !pdev) {
|
|
|
ret = -EINVAL;
|
|
|
goto ol_attach_fail;
|
|
|
}
|
|
@@ -1382,7 +1385,8 @@ ol_attach_fail:
|
|
|
/**
|
|
|
* ol_txrx_pdev_attach_target() - send target configuration
|
|
|
*
|
|
|
- * @pdev - the physical device being initialized
|
|
|
+ * @soc_hdl - data path soc handle
|
|
|
+ * @pdev_id - device instance id
|
|
|
*
|
|
|
* The majority of the data SW setup are done by the pdev_attach
|
|
|
* functions, but this function completes the data SW setup by
|
|
@@ -1390,9 +1394,14 @@ ol_attach_fail:
|
|
|
*
|
|
|
* Return: 0 - success 1 - failure
|
|
|
*/
|
|
|
-static int ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
|
|
|
+static int ol_txrx_pdev_attach_target(struct cdp_soc_t *soc_hdl,
|
|
|
+ uint8_t pdev_id)
|
|
|
{
|
|
|
- struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
|
|
|
+
|
|
|
+ if (!pdev)
|
|
|
+ return QDF_STATUS_E_FAULT;
|
|
|
|
|
|
return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
|
|
|
}
|
|
@@ -1443,7 +1452,8 @@ static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_pdev_pre_detach() - detach the data SW state
|
|
|
- * @pdev - the data physical device object being removed
|
|
|
+ * @soc_hdl - datapath soc handle
|
|
|
+ * @pdev_id - the data physical device id being removed
|
|
|
* @force - delete the pdev (and its vdevs and peers) even if
|
|
|
* there are outstanding references by the target to the vdevs
|
|
|
* and peers within the pdev
|
|
@@ -1451,11 +1461,14 @@ static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
|
|
|
* This function is used when the WLAN driver is being removed to
|
|
|
* detach the host data component within the driver.
|
|
|
*
|
|
|
- * Return: None
|
|
|
+ * Return: none
|
|
|
*/
|
|
|
-static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
|
|
|
+static void ol_txrx_pdev_pre_detach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
|
|
+ int force)
|
|
|
{
|
|
|
- struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
|
|
|
+ pdev_id);
|
|
|
|
|
|
/* preconditions */
|
|
|
TXRX_ASSERT2(pdev);
|
|
@@ -1547,11 +1560,14 @@ static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
|
|
|
#ifdef QCA_COMPUTE_TX_DELAY
|
|
|
qdf_spinlock_destroy(&pdev->tx_delay.mutex);
|
|
|
#endif
|
|
|
+
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_pdev_detach() - delete the data SW state
|
|
|
- * @ppdev - the data physical device object being removed
|
|
|
+ * @soc_hdl - data path soc handle
|
|
|
+ * @pdev_id - device instance id
|
|
|
* @force - delete the pdev (and its vdevs and peers) even if
|
|
|
* there are outstanding references by the target to the vdevs
|
|
|
* and peers within the pdev
|
|
@@ -1561,24 +1577,26 @@ static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
|
|
|
* All virtual devices within the physical device need to be deleted
|
|
|
* (ol_txrx_vdev_detach) before the physical device itself is deleted.
|
|
|
*
|
|
|
- * Return: None
|
|
|
+ * Return: Success or Failure
|
|
|
*/
|
|
|
-static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
|
|
|
+static QDF_STATUS ol_txrx_pdev_detach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
|
|
+ int force)
|
|
|
{
|
|
|
- struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
|
|
|
- struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
|
|
|
+ pdev_id);
|
|
|
struct ol_txrx_stats_req_internal *req, *temp_req;
|
|
|
int i = 0;
|
|
|
|
|
|
if (!soc) {
|
|
|
ol_txrx_err("soc is NULL");
|
|
|
- return;
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
}
|
|
|
|
|
|
/*checking to ensure txrx pdev structure is not NULL */
|
|
|
if (!pdev) {
|
|
|
ol_txrx_err("pdev is NULL");
|
|
|
- return;
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
}
|
|
|
|
|
|
qdf_spin_lock_bh(&pdev->req_list_spinlock);
|
|
@@ -1625,6 +1643,8 @@ static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
|
|
|
|
|
|
soc->pdev_list[pdev->id] = NULL;
|
|
|
qdf_mem_free(pdev);
|
|
|
+
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
}
|
|
|
|
|
|
#if defined(QCA_HL_NETDEV_FLOW_CONTROL)
|
|
@@ -1657,7 +1677,8 @@ ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
|
|
|
* ol_txrx_vdev_attach - Allocate and initialize the data object
|
|
|
* for a new virtual device.
|
|
|
*
|
|
|
- * @data_pdev - the physical device the virtual device belongs to
|
|
|
+ * @@soc_hdl - data path soc handle
|
|
|
+ * @pdev_id - physical device instance id
|
|
|
* @vdev_mac_addr - the MAC address of the virtual device
|
|
|
* @vdev_id - the ID used to identify the virtual device to the target
|
|
|
* @op_mode - whether this virtual device is operating as an AP,
|
|
@@ -1667,13 +1688,15 @@ ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
|
|
|
* Return: success: handle to new data vdev object, failure: NULL
|
|
|
*/
|
|
|
static struct cdp_vdev *
|
|
|
-ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
|
|
|
+ol_txrx_vdev_attach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
|
|
uint8_t *vdev_mac_addr,
|
|
|
uint8_t vdev_id, enum wlan_op_mode op_mode,
|
|
|
enum wlan_op_subtype subtype)
|
|
|
{
|
|
|
- struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
|
|
|
- struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
|
|
|
+ pdev_id);
|
|
|
+
|
|
|
struct ol_txrx_vdev_t *vdev;
|
|
|
QDF_STATUS qdf_status;
|
|
|
|
|
@@ -1711,8 +1734,7 @@ ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
|
|
|
TAILQ_INIT(&vdev->peer_list);
|
|
|
vdev->last_real_peer = NULL;
|
|
|
|
|
|
- ol_txrx_hl_tdls_flag_reset(ol_txrx_soc_t_to_cdp_soc_t(soc),
|
|
|
- vdev_id, false);
|
|
|
+ ol_txrx_hl_tdls_flag_reset(soc_hdl, vdev_id, false);
|
|
|
|
|
|
#ifdef QCA_IBSS_SUPPORT
|
|
|
vdev->ibss_peer_num = 0;
|
|
@@ -1787,7 +1809,8 @@ ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
|
|
|
* ol_txrx_vdev_register - Link a vdev's data object with the
|
|
|
* matching OS shim vdev object.
|
|
|
*
|
|
|
- * @pvdev: the virtual device's data object
|
|
|
+ * @soc_hdl: datapath soc handle
|
|
|
+ * @vdev_id: the virtual device's id
|
|
|
* @osif_vdev: the virtual device's OS shim object
|
|
|
* @txrx_ops: (pointers to)functions used for tx and rx data xfer
|
|
|
*
|
|
@@ -1802,15 +1825,19 @@ ol_txrx_vdev_attach(struct cdp_pdev *ppdev,
|
|
|
* vdev objects, so the data SW can use the OS shim vdev handle
|
|
|
* when passing rx data received by a vdev up to the OS shim.
|
|
|
*/
|
|
|
-static void ol_txrx_vdev_register(struct cdp_vdev *pvdev, void *osif_vdev,
|
|
|
- struct ol_txrx_ops *txrx_ops)
|
|
|
+static QDF_STATUS ol_txrx_vdev_register(struct cdp_soc_t *soc_hdl,
|
|
|
+ uint8_t vdev_id,
|
|
|
+ ol_osif_vdev_handle osif_vdev,
|
|
|
+ struct ol_txrx_ops *txrx_ops)
|
|
|
{
|
|
|
- struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
|
|
|
+ vdev_id);
|
|
|
|
|
|
if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
|
|
|
qdf_print("vdev/txrx_ops is NULL!");
|
|
|
qdf_assert(0);
|
|
|
- return;
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
}
|
|
|
|
|
|
vdev->osif_dev = osif_vdev;
|
|
@@ -1818,6 +1845,8 @@ static void ol_txrx_vdev_register(struct cdp_vdev *pvdev, void *osif_vdev,
|
|
|
vdev->stats_rx = txrx_ops->rx.stats_rx;
|
|
|
vdev->tx_comp = txrx_ops->tx.tx_comp;
|
|
|
txrx_ops->tx.tx = ol_tx_data;
|
|
|
+
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
}
|
|
|
|
|
|
void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
|
|
@@ -1903,7 +1932,8 @@ ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
|
|
|
/**
|
|
|
* ol_txrx_vdev_detach - Deallocate the specified data virtual
|
|
|
* device object.
|
|
|
- * @data_vdev: data object for the virtual device in question
|
|
|
+ * @soc_hdl - data path soc handle
|
|
|
+ * @vdev_id: vdev id
|
|
|
* @callback: function to call (if non-NULL) once the vdev has
|
|
|
* been wholly deleted
|
|
|
* @callback_context: context to provide in the callback
|
|
@@ -1923,13 +1953,18 @@ ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
|
|
|
* vdev_detach call, or if it's deferred until all in-progress peer
|
|
|
* deletions have completed.
|
|
|
*/
|
|
|
-static void
|
|
|
-ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
|
|
|
+static QDF_STATUS
|
|
|
+ol_txrx_vdev_detach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
|
|
ol_txrx_vdev_delete_cb callback, void *context)
|
|
|
{
|
|
|
- struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
|
|
|
+ vdev_id);
|
|
|
struct ol_txrx_pdev_t *pdev;
|
|
|
|
|
|
+ if (qdf_unlikely(!vdev))
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
+
|
|
|
/* preconditions */
|
|
|
TXRX_ASSERT2(vdev);
|
|
|
pdev = vdev->pdev;
|
|
@@ -1988,7 +2023,7 @@ ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
|
|
|
vdev->delete.callback = callback;
|
|
|
vdev->delete.context = context;
|
|
|
qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
|
|
|
- return;
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
}
|
|
|
qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
|
|
|
qdf_event_destroy(&vdev->wait_delete_comp);
|
|
@@ -2020,6 +2055,8 @@ ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
|
|
|
qdf_mem_free(vdev);
|
|
|
if (callback)
|
|
|
callback(context);
|
|
|
+
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -2128,8 +2165,8 @@ static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
|
|
|
/**
|
|
|
* ol_txrx_peer_attach - Allocate and set up references for a
|
|
|
* data peer object.
|
|
|
- * @pvdev - data virtual device object that will directly
|
|
|
- * own the data_peer object
|
|
|
+ * @soc_hdl - data path soc handle
|
|
|
+ * @vdev_id - virtual device instance id
|
|
|
* @peer_mac_addr - MAC address of the new peer
|
|
|
*
|
|
|
* When an association with a peer starts, the host's control SW
|
|
@@ -2143,13 +2180,15 @@ static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
|
|
|
* so a reference within the control peer object can be set to the
|
|
|
* data peer object.
|
|
|
*
|
|
|
- * Return: handle to new data peer object, or NULL if the attach
|
|
|
- * fails
|
|
|
+ * Return: 0 on success, -1 on failure
|
|
|
*/
|
|
|
static void *
|
|
|
-ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
|
|
|
+ol_txrx_peer_attach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
|
|
+ uint8_t *peer_mac_addr)
|
|
|
{
|
|
|
- struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
|
|
|
+ vdev_id);
|
|
|
struct ol_txrx_peer_t *peer;
|
|
|
struct ol_txrx_peer_t *temp_peer;
|
|
|
uint8_t i;
|
|
@@ -2237,7 +2276,7 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
|
|
|
|
|
|
peer = qdf_mem_malloc(sizeof(*peer));
|
|
|
if (!peer)
|
|
|
- return NULL; /* failure */
|
|
|
+ return NULL;
|
|
|
|
|
|
/* store provided params */
|
|
|
peer->vdev = vdev;
|
|
@@ -2323,7 +2362,7 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
|
|
|
* No Tx in monitor mode, otherwise results in target assert.
|
|
|
* Setting disable_intrabss_fwd to true
|
|
|
*/
|
|
|
- ol_vdev_rx_set_intrabss_fwd((struct cdp_vdev *)vdev, true);
|
|
|
+ ol_vdev_rx_set_intrabss_fwd(soc_hdl, vdev_id, true);
|
|
|
}
|
|
|
|
|
|
ol_txrx_local_peer_id_alloc(pdev, peer);
|
|
@@ -2475,14 +2514,17 @@ static struct cdp_vdev *ol_txrx_get_vdev_for_peer(void *ppeer)
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
|
|
|
- * @vdev: vdev handle
|
|
|
+ * @soc_hdl: datapath soc handle
|
|
|
+ x @vdev_id: virtual interface id
|
|
|
*
|
|
|
* Return: vdev mac address
|
|
|
*/
|
|
|
static uint8_t *
|
|
|
-ol_txrx_get_vdev_mac_addr(struct cdp_vdev *pvdev)
|
|
|
+ol_txrx_get_vdev_mac_addr(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
|
|
|
{
|
|
|
- struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
|
|
|
+ vdev_id);
|
|
|
|
|
|
if (!vdev)
|
|
|
return NULL;
|
|
@@ -2520,14 +2562,20 @@ ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
|
|
|
- * @vdev: vdev handle
|
|
|
+ * @soc_hdl: datapath soc handle
|
|
|
+ * @vdev_id: virtual interface id
|
|
|
*
|
|
|
* Return: Handle to control pdev
|
|
|
*/
|
|
|
static struct cdp_cfg *
|
|
|
-ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_vdev *pvdev)
|
|
|
+ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
|
|
|
{
|
|
|
- struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
|
|
|
+ vdev_id);
|
|
|
+
|
|
|
+ if (!vdev)
|
|
|
+ return NULL;
|
|
|
|
|
|
return vdev->pdev->ctrl_pdev;
|
|
|
}
|
|
@@ -3274,20 +3322,36 @@ void peer_unmap_timer_handler(void *data)
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_peer_detach() - Delete a peer's data object.
|
|
|
- * @peer - the object to detach
|
|
|
+
|
|
|
+ * @soc_hdl: datapath soc handle
|
|
|
+ * @vdev_id: virtual interface id
|
|
|
+ * @peer_mac: peer MAC address
|
|
|
* @bitmap - bitmap indicating special handling of request.
|
|
|
- *
|
|
|
* When the host's control SW disassociates a peer, it calls
|
|
|
* this function to detach and delete the peer. The reference
|
|
|
* stored in the control peer object to the data peer
|
|
|
* object (set up by a call to ol_peer_store()) is provided.
|
|
|
*
|
|
|
- * Return: None
|
|
|
+ * Return: SUCCESS or Failure
|
|
|
*/
|
|
|
-static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
|
|
|
+static QDF_STATUS ol_txrx_peer_detach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
|
|
+ uint8_t *peer_mac, uint32_t bitmap)
|
|
|
{
|
|
|
- ol_txrx_peer_handle peer = ppeer;
|
|
|
- struct ol_txrx_vdev_t *vdev = peer->vdev;
|
|
|
+ ol_txrx_peer_handle peer;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
|
|
|
+ vdev_id);
|
|
|
+
|
|
|
+ if (!vdev)
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
+
|
|
|
+ peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)vdev->pdev,
|
|
|
+ peer_mac);
|
|
|
+ if (!peer)
|
|
|
+ return QDF_STATUS_E_FAILURE;
|
|
|
+
|
|
|
+ ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d", __func__,
|
|
|
+ peer, qdf_atomic_read(&peer->ref_cnt));
|
|
|
|
|
|
/* redirect peer's rx delivery function to point to a discard func */
|
|
|
peer->rx_opt_proc = ol_rx_discard;
|
|
@@ -3342,6 +3406,8 @@ static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
|
|
|
* reference, added by the PEER_MAP message.
|
|
|
*/
|
|
|
ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
|
|
|
+
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3357,6 +3423,7 @@ static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
|
|
|
*/
|
|
|
static void ol_txrx_peer_detach_force_delete(void *ppeer)
|
|
|
{
|
|
|
+ struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
|
|
|
ol_txrx_peer_handle peer = ppeer;
|
|
|
ol_txrx_pdev_handle pdev = peer->vdev->pdev;
|
|
|
|
|
@@ -3365,7 +3432,9 @@ static void ol_txrx_peer_detach_force_delete(void *ppeer)
|
|
|
|
|
|
/* Clear the peer_id_to_obj map entries */
|
|
|
ol_txrx_peer_remove_obj_map_entries(pdev, peer);
|
|
|
- ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
|
|
|
+ ol_txrx_peer_detach((struct cdp_soc_t *)soc, peer->vdev->vdev_id,
|
|
|
+ peer->mac_addr.raw,
|
|
|
+ 1 << CDP_PEER_DELETE_NO_SPECIAL);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3376,34 +3445,45 @@ static void ol_txrx_peer_detach_force_delete(void *ppeer)
|
|
|
*
|
|
|
* Return: None
|
|
|
*/
|
|
|
-static void ol_txrx_peer_detach_sync(void *ppeer,
|
|
|
+static void ol_txrx_peer_detach_sync(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
|
|
+ uint8_t *peer_mac,
|
|
|
ol_txrx_peer_unmap_sync_cb peer_unmap_sync,
|
|
|
uint32_t bitmap)
|
|
|
{
|
|
|
- ol_txrx_peer_handle peer = ppeer;
|
|
|
- ol_txrx_pdev_handle pdev = peer->vdev->pdev;
|
|
|
+ struct ol_txrx_pdev_t *pdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
|
|
|
+ vdev_id);
|
|
|
|
|
|
- ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d", __func__,
|
|
|
- peer, qdf_atomic_read(&peer->ref_cnt));
|
|
|
+ if (!vdev || !vdev->pdev)
|
|
|
+ return;
|
|
|
|
|
|
+ pdev = vdev->pdev;
|
|
|
if (!pdev->peer_unmap_sync_cb)
|
|
|
pdev->peer_unmap_sync_cb = peer_unmap_sync;
|
|
|
|
|
|
- ol_txrx_peer_detach(peer, bitmap);
|
|
|
+ ol_txrx_peer_detach(soc_hdl, vdev_id, peer_mac, bitmap);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_peer_unmap_sync_cb_set() - set peer unmap sync callback
|
|
|
- * @ppdev - TXRX pdev context
|
|
|
+ * @soc_hdl - datapath soc handle
|
|
|
+ * pdev_id - physical device instance id
|
|
|
* @peer_unmap_sync - peer unmap sync callback
|
|
|
*
|
|
|
* Return: None
|
|
|
*/
|
|
|
static void ol_txrx_peer_unmap_sync_cb_set(
|
|
|
- struct cdp_pdev *ppdev,
|
|
|
+ struct cdp_soc_t *soc_hdl,
|
|
|
+ uint8_t pdev_id,
|
|
|
ol_txrx_peer_unmap_sync_cb peer_unmap_sync)
|
|
|
{
|
|
|
- struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
|
|
|
+ pdev_id);
|
|
|
+
|
|
|
+ if (!pdev)
|
|
|
+ return;
|
|
|
|
|
|
if (!pdev->peer_unmap_sync_cb)
|
|
|
pdev->peer_unmap_sync_cb = peer_unmap_sync;
|
|
@@ -3750,21 +3830,39 @@ struct ol_txrx_stats_req_internal
|
|
|
return req;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * ol_txrx_fw_stats_get() - Get fw stats
|
|
|
+ *
|
|
|
+ * @soc_hdl: datapath soc handle
|
|
|
+ * @vdev_id: virtual interface id
|
|
|
+ * @req: specifications of stats request
|
|
|
+ * @per_vdev: bool input whether stats requested per vdev or not
|
|
|
+ * @response_expected: bool input whether expecting response or not
|
|
|
+ *
|
|
|
+ * Return: success or failure
|
|
|
+ */
|
|
|
static A_STATUS
|
|
|
-ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
|
|
|
- bool per_vdev, bool response_expected)
|
|
|
+ol_txrx_fw_stats_get(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
|
|
+ struct ol_txrx_stats_req *req, bool per_vdev,
|
|
|
+ bool response_expected)
|
|
|
{
|
|
|
- struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
- struct ol_txrx_pdev_t *pdev = vdev->pdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
|
|
|
+ vdev_id);
|
|
|
+ struct ol_txrx_pdev_t *pdev;
|
|
|
uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
|
|
|
struct ol_txrx_stats_req_internal *non_volatile_req;
|
|
|
struct ol_txrx_fw_stats_desc_t *desc = NULL;
|
|
|
struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
|
|
|
|
|
|
+ if (!vdev)
|
|
|
+ return A_EINVAL;
|
|
|
+
|
|
|
+ pdev = vdev->pdev;
|
|
|
if (!pdev ||
|
|
|
req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
|
|
|
req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
|
|
|
- return A_ERROR;
|
|
|
+ return A_EINVAL;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3783,7 +3881,7 @@ ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
|
|
|
desc = ol_txrx_fw_stats_desc_alloc(pdev);
|
|
|
if (!desc) {
|
|
|
qdf_mem_free(non_volatile_req);
|
|
|
- return A_ERROR;
|
|
|
+ return A_NO_MEMORY;
|
|
|
}
|
|
|
|
|
|
/* use the desc id as the cookie */
|
|
@@ -4423,9 +4521,21 @@ ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
|
|
|
}
|
|
|
#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
|
|
|
|
|
|
-static void ol_vdev_rx_set_intrabss_fwd(struct cdp_vdev *pvdev, bool val)
|
|
|
+/**
|
|
|
+ * ol_vdev_rx_set_intrabss_fwd() - Get fw stats
|
|
|
+ *
|
|
|
+ * @soc_hdl: datapath soc handle
|
|
|
+ * @vdev_id: virtual interface id
|
|
|
+ * @val: enable or disable
|
|
|
+ *
|
|
|
+ * Return: void
|
|
|
+ */
|
|
|
+static void ol_vdev_rx_set_intrabss_fwd(struct cdp_soc_t *soc_hdl,
|
|
|
+ uint8_t vdev_id, bool val)
|
|
|
{
|
|
|
- struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
|
|
|
+ vdev_id);
|
|
|
|
|
|
if (!vdev)
|
|
|
return;
|
|
@@ -4480,18 +4590,22 @@ static uint32_t ol_txrx_get_tx_ack_stats(struct cdp_soc_t *soc_hdl,
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_display_stats() - Display OL TXRX display stats
|
|
|
+ * @soc_hdl: Datapath soc handle
|
|
|
* @value: Module id for which stats needs to be displayed
|
|
|
+ * @verb_level: verbose level of stats to be displayed
|
|
|
*
|
|
|
* Return: status
|
|
|
*/
|
|
|
static QDF_STATUS
|
|
|
-ol_txrx_display_stats(void *soc, uint16_t value,
|
|
|
+ol_txrx_display_stats(struct cdp_soc_t *soc_hdl, uint16_t value,
|
|
|
enum qdf_stats_verbosity_level verb_level)
|
|
|
{
|
|
|
- ol_txrx_pdev_handle pdev;
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(
|
|
|
+ soc,
|
|
|
+ OL_TXRX_PDEV_ID);
|
|
|
QDF_STATUS status = QDF_STATUS_SUCCESS;
|
|
|
|
|
|
- pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
if (!pdev) {
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
"%s: pdev is NULL", __func__);
|
|
@@ -4509,7 +4623,7 @@ ol_txrx_display_stats(void *soc, uint16_t value,
|
|
|
if (verb_level == QDF_STATS_VERBOSITY_LEVEL_LOW)
|
|
|
ol_tx_dump_flow_pool_info_compact((void *)pdev);
|
|
|
else
|
|
|
- ol_tx_dump_flow_pool_info(soc);
|
|
|
+ ol_tx_dump_flow_pool_info(soc_hdl);
|
|
|
break;
|
|
|
case CDP_TXRX_DESC_STATS:
|
|
|
qdf_nbuf_tx_desc_count_display();
|
|
@@ -5353,19 +5467,22 @@ struct ol_txrx_vdev_t *ol_txrx_get_vdev_from_soc_vdev_id(
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_get_mon_vdev_from_pdev() - get monitor mode vdev from pdev
|
|
|
- * @ppdev: the physical device the virtual device belongs to
|
|
|
+ * @soc_hdl: datapath soc handle
|
|
|
+ * @pdev_id: the physical device id the virtual device belongs to
|
|
|
*
|
|
|
- * Return: vdev handle
|
|
|
- * NULL if not found.
|
|
|
+ * Return: vdev id
|
|
|
+ * error if not found.
|
|
|
*/
|
|
|
-struct cdp_vdev *ol_txrx_get_mon_vdev_from_pdev(struct cdp_pdev *ppdev)
|
|
|
+uint8_t ol_txrx_get_mon_vdev_from_pdev(struct cdp_soc_t *soc_hdl,
|
|
|
+ uint8_t pdev_id)
|
|
|
{
|
|
|
- struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
+ struct ol_txrx_soc_t *soc = (struct ol_txrx_soc_t *)soc_hdl;
|
|
|
+ ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
|
|
|
|
|
|
if (qdf_unlikely(!pdev))
|
|
|
- return NULL;
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
- return (struct cdp_vdev *)pdev->monitor_vdev;
|
|
|
+ return pdev->monitor_vdev->vdev_id;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -5422,9 +5539,9 @@ static QDF_STATUS ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
|
|
|
*
|
|
|
* MCL legacy OL do nothing here
|
|
|
*
|
|
|
- * Return: noe
|
|
|
+ * Return: none
|
|
|
*/
|
|
|
-static void ol_txrx_soc_detach(void *soc)
|
|
|
+static void ol_txrx_soc_detach(struct cdp_soc_t *soc)
|
|
|
{
|
|
|
qdf_mem_free(soc);
|
|
|
}
|
|
@@ -5484,20 +5601,6 @@ static void ol_txrx_wrapper_flush_rx_frames(struct cdp_soc_t *soc_hdl,
|
|
|
ol_txrx_flush_rx_frames(peer, drop);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * ol_txrx_wrapper_get_vdev_from_vdev_id() - get vdev instance from vdev id
|
|
|
- * @ppdev: pdev handle
|
|
|
- * @vdev_id: interface id
|
|
|
- *
|
|
|
- * Return: virtual interface instance
|
|
|
- */
|
|
|
-static
|
|
|
-struct cdp_vdev *ol_txrx_wrapper_get_vdev_from_vdev_id(struct cdp_pdev *ppdev,
|
|
|
- uint8_t vdev_id)
|
|
|
-{
|
|
|
- return ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* ol_txrx_wrapper_register_peer() - register peer
|
|
|
* @pdev: pdev handle
|
|
@@ -5609,15 +5712,18 @@ ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
|
|
|
|
|
|
/**
|
|
|
* ol_txrx_get_cfg() - get ini/cgf values in legacy dp
|
|
|
- * @soc: soc context
|
|
|
+ * @soc_hdl: soc context
|
|
|
* @cfg_param: cfg parameters
|
|
|
*
|
|
|
* Return: none
|
|
|
*/
|
|
|
-static uint32_t ol_txrx_get_cfg(void *soc, enum cdp_dp_cfg cfg)
|
|
|
+static uint32_t ol_txrx_get_cfg(struct cdp_soc_t *soc_hdl, enum cdp_dp_cfg cfg)
|
|
|
{
|
|
|
struct txrx_pdev_cfg_t *cfg_ctx;
|
|
|
- ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
+ struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
|
|
+ ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(
|
|
|
+ soc,
|
|
|
+ OL_TXRX_PDEV_ID);
|
|
|
uint32_t value = 0;
|
|
|
|
|
|
if (!pdev) {
|
|
@@ -5770,7 +5876,6 @@ static struct cdp_cmn_ops ol_ops_cmn = {
|
|
|
.txrx_vdev_register = ol_txrx_vdev_register,
|
|
|
.txrx_soc_detach = ol_txrx_soc_detach,
|
|
|
.txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
|
|
|
- .txrx_get_vdev_from_vdev_id = ol_txrx_wrapper_get_vdev_from_vdev_id,
|
|
|
.txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
|
|
|
.txrx_get_mon_vdev_from_pdev = ol_txrx_get_mon_vdev_from_pdev,
|
|
|
.txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
|