qcacmn: Add missing lock protection between DP Tx and Tx completions

Change-Id: I68dd5371688235c173a5bc6576601389146e0ecb
CRs-Fixed: 2004658
This commit is contained in:
Vijay Pamidipati
2017-02-09 22:49:00 +05:30
committed by qcabuildsw
parent 25aa822c28
commit 4d5d436e8a
4 changed files with 16 additions and 43 deletions

View File

@@ -114,11 +114,10 @@ dp_tx_desc_release(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id); dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
vdev->num_tx_outstanding--; qdf_atomic_dec(&pdev->num_tx_outstanding);
pdev->num_tx_outstanding--;
if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
pdev->num_tx_exception--; qdf_atomic_dec(&pdev->num_tx_exception);
if (HAL_TX_COMP_RELEASE_SOURCE_TQM == if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
hal_tx_comp_get_buffer_source(&tx_desc->comp)) hal_tx_comp_get_buffer_source(&tx_desc->comp))
@@ -311,8 +310,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
} }
/* Flow control/Congestion Control counters */ /* Flow control/Congestion Control counters */
vdev->num_tx_outstanding++; qdf_atomic_inc(&pdev->num_tx_outstanding);
pdev->num_tx_outstanding++;
/* Initialize the SW tx descriptor */ /* Initialize the SW tx descriptor */
tx_desc->nbuf = nbuf; tx_desc->nbuf = nbuf;
@@ -346,7 +344,6 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
align_pad); align_pad);
tx_desc->pkt_offset += htt_hdr_size; tx_desc->pkt_offset += htt_hdr_size;
tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
pdev->num_tx_exception++;
is_exception = 1; is_exception = 1;
} }
@@ -354,7 +351,6 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
eh = (struct ether_header *) qdf_nbuf_data(nbuf); eh = (struct ether_header *) qdf_nbuf_data(nbuf);
if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) { if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
pdev->num_tx_exception++;
is_exception = 1; is_exception = 1;
} }
} }
@@ -365,7 +361,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
{ {
/* Temporary WAR due to TQM VP issues */ /* Temporary WAR due to TQM VP issues */
tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
pdev->num_tx_exception++; qdf_atomic_inc(&pdev->num_tx_exception);
} }
return tx_desc; return tx_desc;
@@ -412,11 +408,8 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
if (!tx_desc) if (!tx_desc)
return NULL; return NULL;
tx_desc->flags |= DP_TX_DESC_FLAG_ALLOCATED;
/* Flow control/Congestion Control counters */ /* Flow control/Congestion Control counters */
vdev->num_tx_outstanding++; qdf_atomic_inc(&pdev->num_tx_outstanding);
pdev->num_tx_outstanding++;
/* Initialize the SW tx descriptor */ /* Initialize the SW tx descriptor */
tx_desc->nbuf = nbuf; tx_desc->nbuf = nbuf;
@@ -438,7 +431,7 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
#if TQM_BYPASS_WAR #if TQM_BYPASS_WAR
/* Temporary WAR due to TQM VP issues */ /* Temporary WAR due to TQM VP issues */
tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW; tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
pdev->num_tx_exception++; qdf_atomic_inc(&pdev->num_tx_exception);
#endif #endif
tx_desc->msdu_ext_desc = msdu_ext_desc; tx_desc->msdu_ext_desc = msdu_ext_desc;
@@ -1071,7 +1064,7 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
switch (tx_status) { switch (tx_status) {
case HTT_TX_FW2WBM_TX_STATUS_OK: case HTT_TX_FW2WBM_TX_STATUS_OK:
{ {
pdev->num_tx_exception--; qdf_atomic_dec(&pdev->num_tx_exception);
DP_TX_FREE_SINGLE_BUF(soc, vdev, DP_TX_FREE_SINGLE_BUF(soc, vdev,
tx_desc->nbuf); tx_desc->nbuf);
break; break;
@@ -1081,7 +1074,7 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
{ {
DP_TX_FREE_SINGLE_BUF(soc, vdev, DP_TX_FREE_SINGLE_BUF(soc, vdev,
tx_desc->nbuf); tx_desc->nbuf);
pdev->num_tx_exception--; qdf_atomic_dec(&pdev->num_tx_exception);
DP_STATS_MSDU_INCR(soc, tx.dropped.pkts, tx_desc->nbuf); DP_STATS_MSDU_INCR(soc, tx.dropped.pkts, tx_desc->nbuf);
break; break;
} }
@@ -1372,8 +1365,6 @@ uint32_t dp_tx_comp_handler(struct dp_soc *soc, uint32_t ring_id,
QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev) QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
{ {
vdev->num_tx_outstanding = 0;
/* /*
* Fill HTT TCL Metadata with Vdev ID and MAC ID * Fill HTT TCL Metadata with Vdev ID and MAC ID
*/ */
@@ -1418,8 +1409,8 @@ QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
struct dp_soc *soc = pdev->soc; struct dp_soc *soc = pdev->soc;
/* Initialize Flow control counters */ /* Initialize Flow control counters */
pdev->num_tx_exception = 0; qdf_atomic_init(&pdev->num_tx_exception);
pdev->num_tx_outstanding = 0; qdf_atomic_init(&pdev->num_tx_outstanding);
if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) { if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
/* Initialize descriptors in TCL Ring */ /* Initialize descriptors in TCL Ring */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016 The Linux Foundation. All rights reserved. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
* *
* Permission to use, copy, modify, and/or distribute this software for * Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the * any purpose with or without fee is hereby granted, provided that the
@@ -36,26 +36,10 @@
#define DP_TX_DESC_ID_OFFSET_MASK 0x0003FF #define DP_TX_DESC_ID_OFFSET_MASK 0x0003FF
#define DP_TX_DESC_ID_OFFSET_OS 0 #define DP_TX_DESC_ID_OFFSET_OS 0
/**
* In case of TX descriptor pool and CPU core is combined
* TX context and TX comp context also should running on the same core
* in this case, each TX desciptror pool operation will be serialized by core
* TX and TX_COMP will not race. locking for protection is not requried
* TX_DESC_POOL_PER_CORE : this is most likely for WIN
* MCL, TX descriptor pool will be tied to VDEV instance.
* Then locking protection is required
*/
#ifdef TX_CORE_ALIGNED_SEND
#define TX_DESC_LOCK_CREATE(lock) /* NOOP */
#define TX_DESC_LOCK_DESTROY(lock) /* NOOP */
#define TX_DESC_LOCK_LOCK(lock) /* NOOP */
#define TX_DESC_LOCK_UNLOCK(lock) /* NOOP */
#else
#define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock) #define TX_DESC_LOCK_CREATE(lock) qdf_spinlock_create(lock)
#define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock) #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
#define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock(lock) #define TX_DESC_LOCK_LOCK(lock) qdf_spin_lock(lock)
#define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock(lock) #define TX_DESC_LOCK_UNLOCK(lock) qdf_spin_unlock(lock)
#endif /* TX_CORE_ALIGNED_SEND */
QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id, QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
uint16_t num_elem); uint16_t num_elem);
@@ -91,7 +75,7 @@ static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
soc->tx_desc[desc_pool_id].num_allocated++; soc->tx_desc[desc_pool_id].num_allocated++;
} }
DP_STATS_ADD(pdev, pub.tx.desc_in_use, 1); DP_STATS_ADD(pdev, pub.tx.desc_in_use, 1);
tx_desc->flags |= DP_TX_DESC_FLAG_ALLOCATED; tx_desc->flags = DP_TX_DESC_FLAG_ALLOCATED;
TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock); TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
return tx_desc; return tx_desc;
@@ -111,7 +95,7 @@ dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
{ {
TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock); TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
tx_desc->flags &= ~DP_TX_DESC_FLAG_ALLOCATED; tx_desc->flags = 0;
tx_desc->next = soc->tx_desc[desc_pool_id].freelist; tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
soc->tx_desc[desc_pool_id].freelist = tx_desc; soc->tx_desc[desc_pool_id].freelist = tx_desc;
DP_STATS_SUB(pdev, pub.tx.desc_in_use, 1); DP_STATS_SUB(pdev, pub.tx.desc_in_use, 1);

View File

@@ -537,9 +537,9 @@ struct dp_pdev {
/* Enhanced Stats is enabled */ /* Enhanced Stats is enabled */
bool ap_stats_tx_cal_enable; bool ap_stats_tx_cal_enable;
uint32_t num_tx_outstanding; qdf_atomic_t num_tx_outstanding;
uint32_t num_tx_exception; qdf_atomic_t num_tx_exception;
/* MCL specific local peer handle */ /* MCL specific local peer handle */
struct { struct {
@@ -655,8 +655,6 @@ struct dp_vdev {
/* Multicast enhancement enabled */ /* Multicast enhancement enabled */
uint8_t mcast_enhancement_en; uint8_t mcast_enhancement_en;
uint32_t num_tx_outstanding;
/* per vdev rx nbuf queue */ /* per vdev rx nbuf queue */
qdf_nbuf_queue_t rxq; qdf_nbuf_queue_t rxq;

View File

@@ -280,7 +280,7 @@ static inline void hal_tx_desc_set_buf_addr(void *desc,
HAL_SET_FLD(desc, TCL_DATA_CMD_1, HAL_SET_FLD(desc, TCL_DATA_CMD_1,
BUFFER_ADDR_INFO_BUF_ADDR_INFO) |= BUFFER_ADDR_INFO_BUF_ADDR_INFO) |=
HAL_TX_SM(BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32, HAL_TX_SM(BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
(((uint64_t) paddr) << 32)); (((uint64_t) paddr) >> 32));
/* Set buffer_addr_info.return_buffer_manager = pool id */ /* Set buffer_addr_info.return_buffer_manager = pool id */
HAL_SET_FLD(desc, TCL_DATA_CMD_1, HAL_SET_FLD(desc, TCL_DATA_CMD_1,