|
@@ -219,6 +219,7 @@ is_ol_tx_discard_frames_success(struct ol_txrx_pdev_t *pdev,
|
|
|
struct ol_tx_desc_t *tx_desc)
|
|
|
{
|
|
|
ol_txrx_vdev_handle vdev;
|
|
|
+
|
|
|
vdev = tx_desc->vdev;
|
|
|
return qdf_atomic_read(&vdev->tx_desc_count) >
|
|
|
((ol_tx_desc_pool_size_hl(pdev->ctrl_pdev) >> 1)
|
|
@@ -254,6 +255,7 @@ ol_tx_enqueue(
|
|
|
*/
|
|
|
if (is_ol_tx_discard_frames_success(pdev, tx_desc)) {
|
|
|
ol_tx_desc_list tx_descs;
|
|
|
+
|
|
|
TAILQ_INIT(&tx_descs);
|
|
|
ol_tx_queue_discard(pdev, false, &tx_descs);
|
|
|
/*Discard Frames in Discard List*/
|
|
@@ -296,7 +298,7 @@ ol_tx_dequeue(
|
|
|
{
|
|
|
u_int16_t num_frames;
|
|
|
int bytes_sum;
|
|
|
- unsigned credit_sum;
|
|
|
+ unsigned int credit_sum;
|
|
|
|
|
|
TXRX_ASSERT2(txq->flag != ol_tx_queue_paused);
|
|
|
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
|
|
@@ -307,8 +309,9 @@ ol_tx_dequeue(
|
|
|
bytes_sum = 0;
|
|
|
credit_sum = 0;
|
|
|
for (num_frames = 0; num_frames < max_frames; num_frames++) {
|
|
|
- unsigned frame_credit;
|
|
|
+ unsigned int frame_credit;
|
|
|
struct ol_tx_desc_t *tx_desc;
|
|
|
+
|
|
|
tx_desc = TAILQ_FIRST(&txq->head);
|
|
|
|
|
|
frame_credit = htt_tx_msdu_credit(tx_desc->netbuf);
|
|
@@ -425,6 +428,7 @@ ol_txrx_peer_pause_but_no_mgmt_q_base(
|
|
|
struct ol_txrx_peer_t *peer)
|
|
|
{
|
|
|
int i;
|
|
|
+
|
|
|
for (i = 0; i < OL_TX_MGMT_TID; i++)
|
|
|
ol_txrx_peer_tid_pause_base(pdev, peer, i);
|
|
|
}
|
|
@@ -444,6 +448,7 @@ ol_txrx_peer_pause_base(
|
|
|
struct ol_txrx_peer_t *peer)
|
|
|
{
|
|
|
int i;
|
|
|
+
|
|
|
for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
|
|
|
ol_txrx_peer_tid_pause_base(pdev, peer, i);
|
|
|
}
|
|
@@ -513,6 +518,7 @@ ol_txrx_peer_unpause_but_no_mgmt_q_base(
|
|
|
struct ol_txrx_peer_t *peer)
|
|
|
{
|
|
|
int i;
|
|
|
+
|
|
|
for (i = 0; i < OL_TX_MGMT_TID; i++)
|
|
|
ol_txrx_peer_tid_unpause_base(pdev, peer, i);
|
|
|
}
|
|
@@ -531,6 +537,7 @@ ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer, int tid)
|
|
|
|
|
|
if (tid == -1) {
|
|
|
int i;
|
|
|
+
|
|
|
for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++)
|
|
|
ol_txrx_peer_tid_unpause_base(pdev, peer, i);
|
|
|
|
|
@@ -605,6 +612,7 @@ void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
|
|
|
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
struct ol_txrx_pdev_t *pdev = vdev->pdev;
|
|
|
struct ol_txrx_peer_t *peer;
|
|
|
+
|
|
|
/* TO DO: log the queue unpause */
|
|
|
/* acquire the mutex lock, since we'll be modifying the queues */
|
|
|
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
|
|
@@ -868,14 +876,17 @@ ol_tx_pdev_peer_bal_timer(void *context)
|
|
|
pdev->tx_peer_bal.limit_list[i].limit;
|
|
|
|
|
|
struct ol_txrx_peer_t *peer = NULL;
|
|
|
+
|
|
|
peer = ol_txrx_peer_find_by_id(pdev, peer_id);
|
|
|
TX_SCHED_DEBUG_PRINT(
|
|
|
"%s peer_id %d peer = 0x%x tx limit %d\n",
|
|
|
__func__, peer_id,
|
|
|
(int)peer, tx_limit);
|
|
|
|
|
|
- /* It is possible the peer limit is still not 0,
|
|
|
- but it is the scenario should not be cared */
|
|
|
+ /*
|
|
|
+ * It is possible the peer limit is still not 0,
|
|
|
+ * but it is the scenario should not be cared
|
|
|
+ */
|
|
|
if (peer) {
|
|
|
peer->tx_limit = tx_limit;
|
|
|
} else {
|
|
@@ -1001,6 +1012,7 @@ ol_txrx_peer_link_status_handler(
|
|
|
peer = ol_txrx_peer_find_by_id(pdev, peer_id);
|
|
|
if (peer) {
|
|
|
u_int32_t thresh, limit, phy;
|
|
|
+
|
|
|
phy = peer_link_status->phy;
|
|
|
thresh = pdev->tx_peer_bal.ctl_thresh[phy].tput_thresh;
|
|
|
limit = pdev->tx_peer_bal.ctl_thresh[phy].tx_limit;
|
|
@@ -1165,9 +1177,9 @@ ol_tx_queue_log_oldest_update(struct ol_txrx_pdev_t *pdev, int offset)
|
|
|
(align - ((oldest_record_offset + 1/*type*/)))
|
|
|
& (align - 1);
|
|
|
/*
|
|
|
- QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
|
|
|
- "TXQ LOG old alloc: offset %d, type %d, size %d (%d)\n",
|
|
|
- oldest_record_offset, type, size, size + 1 + align_pad);
|
|
|
+ * QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
|
|
|
+ * "TXQ LOG old alloc: offset %d, type %d, size %d (%d)\n",
|
|
|
+ * oldest_record_offset, type, size, size + 1 + align_pad);
|
|
|
*/
|
|
|
oldest_record_offset += size + 1 + align_pad;
|
|
|
}
|
|
@@ -1224,9 +1236,9 @@ alloc_found:
|
|
|
ol_tx_log_entry_type_wrap;
|
|
|
|
|
|
/*
|
|
|
- QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
|
|
|
- "TXQ LOG new alloc: offset %d, type %d, size %d (%d)\n",
|
|
|
- offset, type, size, size + 1 + align_pad);
|
|
|
+ * QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
|
|
|
+ * "TXQ LOG new alloc: offset %d, type %d, size %d (%d)\n",
|
|
|
+ * offset, type, size, size + 1 + align_pad);
|
|
|
*/
|
|
|
pdev->txq_log.data[offset] = type;
|
|
|
pdev->txq_log.offset = offset + size + 1 + align_pad;
|
|
@@ -1261,6 +1273,7 @@ ol_tx_queue_log_record_display(struct ol_txrx_pdev_t *pdev, int offset)
|
|
|
case ol_tx_log_entry_type_enqueue:
|
|
|
{
|
|
|
struct ol_tx_log_queue_add_t record;
|
|
|
+
|
|
|
qdf_mem_copy(&record,
|
|
|
&pdev->txq_log.data[offset + 1 + align_pad],
|
|
|
sizeof(struct ol_tx_log_queue_add_t));
|
|
@@ -1293,13 +1306,14 @@ ol_tx_queue_log_record_display(struct ol_txrx_pdev_t *pdev, int offset)
|
|
|
QDF_TRACE_LEVEL_INFO,
|
|
|
"Q: %6d %5d %3d from vdev",
|
|
|
record.num_frms, record.num_bytes,
|
|
|
- record.tid);
|
|
|
+ record.tid);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
case ol_tx_log_entry_type_dequeue:
|
|
|
{
|
|
|
struct ol_tx_log_queue_add_t record;
|
|
|
+
|
|
|
qdf_mem_copy(&record,
|
|
|
&pdev->txq_log.data[offset + 1 + align_pad],
|
|
|
sizeof(struct ol_tx_log_queue_add_t));
|
|
@@ -1338,6 +1352,7 @@ ol_tx_queue_log_record_display(struct ol_txrx_pdev_t *pdev, int offset)
|
|
|
case ol_tx_log_entry_type_queue_free:
|
|
|
{
|
|
|
struct ol_tx_log_queue_add_t record;
|
|
|
+
|
|
|
qdf_mem_copy(&record,
|
|
|
&pdev->txq_log.data[offset + 1 + align_pad],
|
|
|
sizeof(struct ol_tx_log_queue_add_t));
|
|
@@ -1457,6 +1472,7 @@ ol_tx_queue_log_display(struct ol_txrx_pdev_t *pdev)
|
|
|
|
|
|
while (unwrap || offset != pdev->txq_log.offset) {
|
|
|
int delta = ol_tx_queue_log_record_display(pdev, offset);
|
|
|
+
|
|
|
if (delta == 0)
|
|
|
return; /* error */
|
|
|
|
|
@@ -1476,6 +1492,7 @@ ol_tx_queue_log_enqueue(
|
|
|
int tid;
|
|
|
u_int16_t peer_id = msdu_info->htt.info.peer_id;
|
|
|
struct ol_tx_log_queue_add_t *log_elem;
|
|
|
+
|
|
|
tid = msdu_info->htt.info.ext_tid;
|
|
|
|
|
|
qdf_spin_lock_bh(&pdev->txq_log_spinlock);
|
|
@@ -1647,6 +1664,7 @@ ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
|
|
|
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
|
|
|
struct ol_txrx_peer_t *peer;
|
|
|
int i;
|
|
|
+
|
|
|
for (i = 0; i < QDF_ARRAY_SIZE(vdev->txqs); i++) {
|
|
|
if (vdev->txqs[i].frms == 0)
|
|
|
continue;
|
|
@@ -1782,7 +1800,6 @@ void ol_txrx_vdev_flush(struct cdp_vdev *pvdev)
|
|
|
#if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
|
|
|
void ol_txrx_vdev_flush(struct cdp_vdev *data_vdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -1925,7 +1942,6 @@ static inline
|
|
|
void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
ol_txrx_pdev_pause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
|
|
|
- return;
|
|
|
}
|
|
|
/**
|
|
|
* ol_txrx_thermal_unpause() - unpause due to thermal mitigation
|
|
@@ -1937,7 +1953,6 @@ static inline
|
|
|
void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
ol_txrx_pdev_unpause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
|
|
|
- return;
|
|
|
}
|
|
|
#else
|
|
|
/**
|
|
@@ -1949,7 +1964,6 @@ void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
|
|
|
static inline
|
|
|
void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1962,7 +1976,6 @@ static inline
|
|
|
void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
ol_tx_pdev_ll_pause_queue_send_all(pdev);
|
|
|
- return;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -2018,6 +2031,7 @@ static void ol_tx_pdev_throttle_phase_timer(void *context)
|
|
|
static void ol_tx_pdev_throttle_tx_timer(void *context)
|
|
|
{
|
|
|
struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
|
|
|
+
|
|
|
ol_tx_pdev_ll_pause_queue_send_all(pdev);
|
|
|
}
|
|
|
#endif
|
|
@@ -2172,6 +2186,7 @@ ol_tx_vdev_has_tx_queue_group(
|
|
|
u_int8_t vdev_id)
|
|
|
{
|
|
|
u_int16_t vdev_bitmap;
|
|
|
+
|
|
|
vdev_bitmap = OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
|
|
|
if (OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_bitmap, vdev_id))
|
|
|
return true;
|
|
@@ -2192,6 +2207,7 @@ ol_tx_ac_has_tx_queue_group(
|
|
|
u_int8_t ac)
|
|
|
{
|
|
|
u_int16_t ac_bitmap;
|
|
|
+
|
|
|
ac_bitmap = OL_TXQ_GROUP_AC_MASK_GET(group->membership);
|
|
|
if (OL_TXQ_GROUP_AC_BIT_MASK_GET(ac_bitmap, ac))
|
|
|
return true;
|
|
@@ -2206,6 +2222,7 @@ u_int32_t ol_tx_txq_group_credit_limit(
|
|
|
{
|
|
|
u_int8_t i;
|
|
|
int updated_credit = credit;
|
|
|
+
|
|
|
/*
|
|
|
* If this tx queue belongs to a group, check whether the group's
|
|
|
* credit limit is more stringent than the global credit limit.
|
|
@@ -2255,6 +2272,7 @@ ol_tx_set_vdev_group_ptr(
|
|
|
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
|
|
|
if (vdev->vdev_id == vdev_id) {
|
|
|
u_int8_t i, j;
|
|
|
+
|
|
|
/* update vdev queues group pointers */
|
|
|
for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
|
|
|
for (j = 0; j < OL_TX_MAX_GROUPS_PER_QUEUE; j++)
|
|
@@ -2282,6 +2300,7 @@ void ol_tx_txq_set_group_ptr(
|
|
|
struct ol_tx_queue_group_t *grp_ptr)
|
|
|
{
|
|
|
u_int8_t i;
|
|
|
+
|
|
|
for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++)
|
|
|
txq->group_ptrs[i] = grp_ptr;
|
|
|
}
|