qcacld-3.0: Send peer unmap conf based on peer unmap count

Send the peer unmap conf commnad to FW for a peer_id only if
UNMAP for that peer_id is received either for three times if
RFS is enabled or one time if RFS is not enabled.

Change-Id: Idba2c406f7ad7d770fe8326e8a0d6ac7fab10252
CRs-Fixed: 2387480
This commit is contained in:
Alok Kumar
2019-01-24 17:49:25 +05:30
committed by nshrivas
parent 3fe3ca13ea
commit 604b033e71
4 changed files with 63 additions and 74 deletions

View File

@@ -773,6 +773,13 @@ ol_txrx_pdev_attach(ol_txrx_soc_handle soc,
pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
if (ol_cfg_is_flow_steering_enabled(pdev->ctrl_pdev))
pdev->peer_id_unmap_ref_cnt =
TXRX_RFS_ENABLE_PEER_ID_UNMAP_COUNT;
else
pdev->peer_id_unmap_ref_cnt =
TXRX_RFS_DISABLE_PEER_ID_UNMAP_COUNT;
ol_txrx_debugfs_init(pdev);
return (struct cdp_pdev *)pdev;
@@ -2193,10 +2200,6 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
peer->peer_ids[i] = HTT_INVALID_PEER;
if (pdev->enable_peer_unmap_conf_support)
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
peer->map_unmap_peer_ids[i] = HTT_INVALID_PEER;
qdf_spinlock_create(&peer->peer_info_lock);
qdf_spinlock_create(&peer->bufq_info.bufq_lock);
@@ -2834,45 +2837,6 @@ ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
return 0;
}
/**
* ol_txrx_send_peer_unmap_conf() - send peer unmap conf cmd to FW
* @pdev: pdev_handle
* @peer: peer_handle
*
* Return: None
*/
static inline void
ol_txrx_send_peer_unmap_conf(ol_txrx_pdev_handle pdev,
ol_txrx_peer_handle peer)
{
int i;
int peer_cnt = 0;
uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
QDF_STATUS status = QDF_STATUS_E_FAILURE;
qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER &&
peer_cnt < MAX_NUM_PEER_ID_PER_PEER; i++) {
if (peer->map_unmap_peer_ids[i] == HTT_INVALID_PEER)
continue;
peer_ids[peer_cnt++] = peer->map_unmap_peer_ids[i];
peer->map_unmap_peer_ids[i] = HTT_INVALID_PEER;
}
qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
if (peer->peer_unmap_sync_cb && peer_cnt) {
ol_txrx_dbg("send unmap conf cmd [%d]", peer_cnt);
status = peer->peer_unmap_sync_cb(
DEBUG_INVALID_VDEV_ID,
peer_cnt, peer_ids);
if (status != QDF_STATUS_SUCCESS)
ol_txrx_err("unable to send unmap conf cmd [%d]",
peer_cnt);
}
}
/**
* ol_txrx_peer_free_tids() - free tids for the peer
* @peer: peer handle
@@ -3089,10 +3053,6 @@ int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
ol_txrx_peer_tx_queue_free(pdev, peer);
/* send peer unmap conf cmd to fw for unmapped peer_ids */
if (pdev->enable_peer_unmap_conf_support)
ol_txrx_send_peer_unmap_conf(pdev, peer);
/* Remove mappings from peer_id to peer object */
ol_txrx_peer_clear_map_peer(pdev, peer);
@@ -3341,11 +3301,14 @@ static void ol_txrx_peer_detach_sync(void *ppeer,
uint32_t bitmap)
{
ol_txrx_peer_handle peer = ppeer;
ol_txrx_pdev_handle pdev = peer->vdev->pdev;
ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d", __func__,
peer, qdf_atomic_read(&peer->ref_cnt));
peer->peer_unmap_sync_cb = peer_unmap_sync;
if (!pdev->peer_unmap_sync_cb)
pdev->peer_unmap_sync_cb = peer_unmap_sync;
ol_txrx_peer_detach(peer, bitmap);
}

View File

@@ -61,6 +61,9 @@ enum ol_txrx_fc_limit_id {
TXRX_FC_MAX
};
#define TXRX_RFS_ENABLE_PEER_ID_UNMAP_COUNT 3
#define TXRX_RFS_DISABLE_PEER_ID_UNMAP_COUNT 1
ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
u8 *peer_addr,
u8 *peer_id,

View File

@@ -471,6 +471,45 @@ void ol_txrx_peer_find_detach(struct ol_txrx_pdev_t *pdev)
ol_txrx_peer_find_hash_detach(pdev);
}
/**
* ol_txrx_peer_unmap_conf_handler() - send peer unmap conf cmd to FW
* @pdev: pdev_handle
* @peer_id: peer_id
*
* Return: None
*/
static inline void
ol_txrx_peer_unmap_conf_handler(ol_txrx_pdev_handle pdev,
uint16_t peer_id)
{
QDF_STATUS status = QDF_STATUS_E_FAILURE;
if (peer_id == HTT_INVALID_PEER) {
ol_txrx_err(
"invalid peer ID %d\n", peer_id);
return;
}
qdf_atomic_inc(&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt);
if (qdf_atomic_read(
&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt) ==
pdev->peer_id_unmap_ref_cnt) {
ol_txrx_dbg("send unmap conf cmd: peer_id[%d] unmap_cnt[%d]",
peer_id, pdev->peer_id_unmap_ref_cnt);
status = pdev->peer_unmap_sync_cb(
DEBUG_INVALID_VDEV_ID,
1, &peer_id);
if (status != QDF_STATUS_SUCCESS)
ol_txrx_err("unable to send unmap conf cmd [%d]",
peer_id);
qdf_atomic_init(
&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt);
}
}
/*=== function definitions for message handling =============================*/
#if defined(CONFIG_HL_SUPPORT)
@@ -585,6 +624,11 @@ void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
/* send peer unmap conf cmd to fw for unmapped peer_ids */
if (pdev->enable_peer_unmap_conf_support &&
pdev->peer_unmap_sync_cb)
ol_txrx_peer_unmap_conf_handler(pdev, peer_id);
if (qdf_atomic_read(
&pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt)) {
/* This peer_id belongs to a peer already deleted */
@@ -618,35 +662,13 @@ void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
if (qdf_atomic_dec_and_test
(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) {
bool peer_id_matched = false;
bool added = false;
pdev->peer_id_to_obj_map[peer_id].peer = NULL;
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
if (peer->peer_ids[i] == peer_id) {
peer->peer_ids[i] = HTT_INVALID_PEER;
peer_id_matched = true;
break;
}
}
if (pdev->enable_peer_unmap_conf_support && peer_id_matched) {
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
if (peer->map_unmap_peer_ids[i] == peer_id) {
added = true;
break;
}
}
if (!added) {
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
if (peer->map_unmap_peer_ids[i] ==
HTT_INVALID_PEER) {
peer->map_unmap_peer_ids[i] =
peer_id;
break;
}
}
}
}
}
ref_cnt = qdf_atomic_read

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
* Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -542,6 +542,7 @@ struct ol_txrx_peer_id_map {
struct ol_txrx_peer_t *peer;
qdf_atomic_t peer_id_ref_cnt;
qdf_atomic_t del_peer_id_ref_cnt;
qdf_atomic_t peer_id_unmap_cnt;
};
/**
@@ -815,6 +816,8 @@ struct ol_txrx_pdev_t {
qdf_spinlock_t peer_map_unmap_lock;
ol_txrx_peer_unmap_sync_cb peer_unmap_sync_cb;
struct {
struct {
struct {
@@ -1058,6 +1061,7 @@ struct ol_txrx_pdev_t {
struct ol_txrx_ipa_resources ipa_resource;
#endif /* IPA_UC_OFFLOAD */
bool new_htt_msg_format;
uint8_t peer_id_unmap_ref_cnt;
bool enable_peer_unmap_conf_support;
};
@@ -1288,13 +1292,10 @@ struct ol_txrx_peer_t {
/* Wrapper around the cached_bufq list */
struct ol_txrx_cached_bufq_t bufq_info;
ol_txrx_peer_unmap_sync_cb peer_unmap_sync_cb;
ol_tx_filter_func tx_filter;
/* peer ID(s) for this peer */
uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
uint16_t map_unmap_peer_ids[MAX_NUM_PEER_ID_PER_PEER];
#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
uint16_t local_id;
#endif