qcacld-3.0: Send peer unmap confirmation cmd to FW

Send WMI_PEER_UNMAP_CONF_CMDID to FW after getting all
UNMAPs for the peer. This will ensure synchronization
between MAP/UNMAP and same peer_id won't be used for
different MAC address until all UNMAPs are processed.

Change-Id: I2c7793b527d5dbab0f9571185e6c16e0c6b7fcc3
CRs-Fixed: 2357245
This commit is contained in:
Alok Kumar
2018-11-28 17:16:03 +05:30
committed by Nitesh Shrivastav
vanhempi a6df1a3b7b
commit e197744efb
4 muutettua tiedostoa jossa 142 lisäystä ja 3 poistoa

Näytä tiedosto

@@ -2063,6 +2063,7 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
struct ol_txrx_pdev_t *pdev; struct ol_txrx_pdev_t *pdev;
bool cmp_wait_mac = false; bool cmp_wait_mac = false;
uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 }; uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
u8 check_valid = 0;
/* preconditions */ /* preconditions */
TXRX_ASSERT2(vdev); TXRX_ASSERT2(vdev);
@@ -2071,6 +2072,9 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
pdev = vdev->pdev; pdev = vdev->pdev;
TXRX_ASSERT2(pdev); TXRX_ASSERT2(pdev);
if (pdev->enable_peer_unmap_conf_support)
check_valid = 1;
if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr, if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
QDF_MAC_ADDR_SIZE)) QDF_MAC_ADDR_SIZE))
cmp_wait_mac = true; cmp_wait_mac = true;
@@ -2079,7 +2083,8 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
/* check for duplicate existing peer */ /* check for duplicate existing peer */
TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) { TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr, if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
(union ol_txrx_align_mac_addr_t *)peer_mac_addr)) { (union ol_txrx_align_mac_addr_t *)peer_mac_addr) &&
(check_valid == 0 || temp_peer->valid)) {
ol_txrx_info_high( ol_txrx_info_high(
"vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exists.\n", "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exists.\n",
vdev->vdev_id, vdev->vdev_id,
@@ -2098,7 +2103,9 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
} }
if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp( if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
&temp_peer->mac_addr, &temp_peer->mac_addr,
&vdev->last_peer_mac_addr)) { &vdev->last_peer_mac_addr) &&
(check_valid == 0 ||
temp_peer->valid)) {
ol_txrx_info_high( ol_txrx_info_high(
"vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exists.\n", "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) old peer exists.\n",
vdev->vdev_id, vdev->vdev_id,
@@ -2172,6 +2179,10 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr,
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
peer->peer_ids[i] = HTT_INVALID_PEER; peer->peer_ids[i] = HTT_INVALID_PEER;
if (pdev->enable_peer_unmap_conf_support)
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
peer->map_unmap_peer_ids[i] = HTT_INVALID_PEER;
qdf_spinlock_create(&peer->peer_info_lock); qdf_spinlock_create(&peer->peer_info_lock);
qdf_spinlock_create(&peer->bufq_info.bufq_lock); qdf_spinlock_create(&peer->bufq_info.bufq_lock);
@@ -2809,6 +2820,45 @@ ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
return 0; return 0;
} }
/**
* ol_txrx_send_peer_unmap_conf() - send peer unmap conf cmd to FW
* @pdev: pdev_handle
* @peer: peer_handle
*
* Return: None
*/
static inline void
ol_txrx_send_peer_unmap_conf(ol_txrx_pdev_handle pdev,
ol_txrx_peer_handle peer)
{
int i;
int peer_cnt = 0;
uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
QDF_STATUS status = QDF_STATUS_E_FAILURE;
qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER &&
peer_cnt < MAX_NUM_PEER_ID_PER_PEER; i++) {
if (peer->map_unmap_peer_ids[i] == HTT_INVALID_PEER)
continue;
peer_ids[peer_cnt++] = peer->map_unmap_peer_ids[i];
peer->map_unmap_peer_ids[i] = HTT_INVALID_PEER;
}
qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
if (peer->peer_unmap_sync_cb && peer_cnt) {
ol_txrx_dbg("send unmap conf cmd [%d]", peer_cnt);
status = peer->peer_unmap_sync_cb(
DEBUG_INVALID_VDEV_ID,
peer_cnt, peer_ids);
if (status != QDF_STATUS_SUCCESS)
ol_txrx_err("unable to send unmap conf cmd [%d]",
peer_cnt);
}
}
/** /**
* ol_txrx_peer_free_tids() - free tids for the peer * ol_txrx_peer_free_tids() - free tids for the peer
* @peer: peer handle * @peer: peer handle
@@ -3025,6 +3075,10 @@ int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
ol_txrx_peer_tx_queue_free(pdev, peer); ol_txrx_peer_tx_queue_free(pdev, peer);
/* send peer unmap conf cmd to fw for unmapped peer_ids */
if (pdev->enable_peer_unmap_conf_support)
ol_txrx_send_peer_unmap_conf(pdev, peer);
/* Remove mappings from peer_id to peer object */ /* Remove mappings from peer_id to peer object */
ol_txrx_peer_clear_map_peer(pdev, peer); ol_txrx_peer_clear_map_peer(pdev, peer);
@@ -3252,6 +3306,28 @@ static void ol_txrx_peer_detach_force_delete(void *ppeer)
ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL); ol_txrx_peer_detach(peer, 1 << CDP_PEER_DELETE_NO_SPECIAL);
} }
/**
* ol_txrx_peer_detach_sync() - peer detach sync callback
* @ppeer - the peer object
* @peer_unmap_sync - peer unmap sync cb.
* @bitmap - bitmap indicating special handling of request.
*
*
* Return: None
*/
static void ol_txrx_peer_detach_sync(void *ppeer,
ol_txrx_peer_unmap_sync_cb peer_unmap_sync,
uint32_t bitmap)
{
ol_txrx_peer_handle peer = ppeer;
ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d", __func__,
peer, qdf_atomic_read(&peer->ref_cnt));
peer->peer_unmap_sync_cb = peer_unmap_sync;
ol_txrx_peer_detach(peer, bitmap);
}
/** /**
* ol_txrx_dump_tx_desc() - dump tx desc total and free count * ol_txrx_dump_tx_desc() - dump tx desc total and free count
* @txrx_pdev: Pointer to txrx pdev * @txrx_pdev: Pointer to txrx pdev
@@ -5393,6 +5469,7 @@ static struct cdp_cmn_ops ol_ops_cmn = {
.txrx_peer_setup = NULL, .txrx_peer_setup = NULL,
.txrx_peer_teardown = NULL, .txrx_peer_teardown = NULL,
.txrx_peer_delete = ol_txrx_peer_detach, .txrx_peer_delete = ol_txrx_peer_detach,
.txrx_peer_delete_sync = ol_txrx_peer_detach_sync,
.txrx_vdev_register = ol_txrx_vdev_register, .txrx_vdev_register = ol_txrx_vdev_register,
.txrx_soc_detach = ol_txrx_soc_detach, .txrx_soc_detach = ol_txrx_soc_detach,
.txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr, .txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
@@ -5542,6 +5619,8 @@ static struct cdp_cfg_ops ol_ops_cfg = {
.set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled, .set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
.set_new_htt_msg_format = .set_new_htt_msg_format =
ol_txrx_set_new_htt_msg_format, ol_txrx_set_new_htt_msg_format,
.set_peer_unmap_conf_support = ol_txrx_set_peer_unmap_conf_support,
.get_peer_unmap_conf_support = ol_txrx_get_peer_unmap_conf_support,
}; };
static struct cdp_peer_ops ol_ops_peer = { static struct cdp_peer_ops ol_ops_peer = {
@@ -5681,3 +5760,24 @@ void ol_txrx_set_new_htt_msg_format(uint8_t val)
pdev->new_htt_msg_format = val; pdev->new_htt_msg_format = val;
} }
bool ol_txrx_get_peer_unmap_conf_support(void)
{
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (!pdev) {
qdf_print("%s: pdev is NULL\n", __func__);
return false;
}
return pdev->enable_peer_unmap_conf_support;
}
void ol_txrx_set_peer_unmap_conf_support(bool val)
{
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (!pdev) {
qdf_print("%s: pdev is NULL\n", __func__);
return;
}
pdev->enable_peer_unmap_conf_support = val;
}

Näytä tiedosto

@@ -378,4 +378,23 @@ bool ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t *pdev);
* return NONE * return NONE
*/ */
void ol_txrx_set_new_htt_msg_format(uint8_t val); void ol_txrx_set_new_htt_msg_format(uint8_t val);
/**
* ol_txrx_set_peer_unmap_conf_support() - set peer unmap conf feature
* @val - enable or disable peer unmap conf feature
*
* Set if peer unamp conf feature is supported by both FW and in INI
*
* return NONE
*/
void ol_txrx_set_peer_unmap_conf_support(bool val);
/**
* ol_txrx_get_peer_unmap_conf_support() - check peer unmap conf feature
*
* Check if peer unmap conf feature is enabled
*
* return true is peer unmap conf feature is enabled else false
*/
bool ol_txrx_get_peer_unmap_conf_support(void);
#endif /* _OL_TXRX__H_ */ #endif /* _OL_TXRX__H_ */

Näytä tiedosto

@@ -375,11 +375,16 @@ static inline void ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t *pdev,
int i; int i;
uint32_t peer_id_ref_cnt; uint32_t peer_id_ref_cnt;
uint32_t peer_ref_cnt; uint32_t peer_ref_cnt;
u8 check_valid = 0;
if (pdev->enable_peer_unmap_conf_support)
check_valid = 1;
/* check if there's already a peer object with this MAC address */ /* check if there's already a peer object with this MAC address */
peer = peer =
ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac_addr, ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac_addr,
1 /* is aligned */, 0, 1 /* is aligned */,
check_valid,
PEER_DEBUG_ID_OL_PEER_MAP); PEER_DEBUG_ID_OL_PEER_MAP);
if (!peer || peer_id == HTT_INVALID_PEER) { if (!peer || peer_id == HTT_INVALID_PEER) {
@@ -613,13 +618,24 @@ void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
if (qdf_atomic_dec_and_test if (qdf_atomic_dec_and_test
(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) { (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) {
bool peer_id_matched = false;
pdev->peer_id_to_obj_map[peer_id].peer = NULL; pdev->peer_id_to_obj_map[peer_id].peer = NULL;
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) { for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
if (peer->peer_ids[i] == peer_id) { if (peer->peer_ids[i] == peer_id) {
peer->peer_ids[i] = HTT_INVALID_PEER; peer->peer_ids[i] = HTT_INVALID_PEER;
peer_id_matched = true;
break; break;
} }
} }
if (pdev->enable_peer_unmap_conf_support && peer_id_matched) {
for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
if (peer->map_unmap_peer_ids[i] ==
HTT_INVALID_PEER) {
peer->map_unmap_peer_ids[i] = peer_id;
break;
}
}
}
} }
ref_cnt = qdf_atomic_read ref_cnt = qdf_atomic_read

Näytä tiedosto

@@ -1048,6 +1048,7 @@ struct ol_txrx_pdev_t {
struct ol_txrx_ipa_resources ipa_resource; struct ol_txrx_ipa_resources ipa_resource;
#endif /* IPA_UC_OFFLOAD */ #endif /* IPA_UC_OFFLOAD */
bool new_htt_msg_format; bool new_htt_msg_format;
bool enable_peer_unmap_conf_support;
}; };
struct ol_txrx_vdev_t { struct ol_txrx_vdev_t {
@@ -1277,10 +1278,13 @@ struct ol_txrx_peer_t {
/* Wrapper around the cached_bufq list */ /* Wrapper around the cached_bufq list */
struct ol_txrx_cached_bufq_t bufq_info; struct ol_txrx_cached_bufq_t bufq_info;
ol_txrx_peer_unmap_sync_cb peer_unmap_sync_cb;
ol_tx_filter_func tx_filter; ol_tx_filter_func tx_filter;
/* peer ID(s) for this peer */ /* peer ID(s) for this peer */
uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER]; uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
uint16_t map_unmap_peer_ids[MAX_NUM_PEER_ID_PER_PEER];
#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
uint16_t local_id; uint16_t local_id;
#endif #endif