|
@@ -50,7 +50,6 @@ ol_tx_sched_log(struct ol_txrx_pdev_t *pdev);
|
|
|
static void
|
|
|
ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
#endif /* defined(DEBUG_HL_LOGGING) */
|
|
|
|
|
@@ -329,6 +328,7 @@ ol_tx_sched_discard_select_category_rr(struct ol_txrx_pdev_t *pdev)
|
|
|
*/
|
|
|
for (i = 0; i < (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES); i++) {
|
|
|
int score;
|
|
|
+
|
|
|
score =
|
|
|
scheduler->tx_active_queues_in_tid_array[i].frms *
|
|
|
scheduler->discard_weights[i];
|
|
@@ -409,6 +409,7 @@ ol_tx_sched_init_rr(
|
|
|
scheduler->tx_active_queues_in_tid_array[i].tid = i;
|
|
|
if (i < OL_TX_NON_QOS_TID) {
|
|
|
int ac = TXRX_TID_TO_WMM_AC(i);
|
|
|
+
|
|
|
switch (ac) {
|
|
|
case TXRX_WMM_AC_VO:
|
|
|
scheduler->discard_weights[i] =
|
|
@@ -427,6 +428,7 @@ ol_tx_sched_init_rr(
|
|
|
}
|
|
|
for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
|
|
|
int j = i + OL_TX_NUM_TIDS;
|
|
|
+
|
|
|
scheduler->tx_active_queues_in_tid_array[j].tid =
|
|
|
OL_TX_NUM_TIDS - 1;
|
|
|
scheduler->discard_weights[j] =
|
|
@@ -513,17 +515,18 @@ struct ol_tx_sched_wrr_adv_category_info_t {
|
|
|
send_limit, \
|
|
|
credit_reserve, \
|
|
|
discard_weights) \
|
|
|
-enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _WRR_SKIP_WEIGHT = \
|
|
|
- (wrr_skip_weight) }; \
|
|
|
-enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_THRESHOLD = \
|
|
|
- (credit_threshold) }; \
|
|
|
-enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _SEND_LIMIT = \
|
|
|
- (send_limit) }; \
|
|
|
-enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_RESERVE = \
|
|
|
- (credit_reserve) }; \
|
|
|
-enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _DISCARD_WEIGHT = \
|
|
|
- (discard_weights) }
|
|
|
-
|
|
|
+ do { \
|
|
|
+ enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _WRR_SKIP_WEIGHT = \
|
|
|
+ (wrr_skip_weight) }; \
|
|
|
+ enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_THRESHOLD = \
|
|
|
+ (credit_threshold) }; \
|
|
|
+ enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _SEND_LIMIT = \
|
|
|
+ (send_limit) }; \
|
|
|
+ enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _CREDIT_RESERVE = \
|
|
|
+ (credit_reserve) }; \
|
|
|
+ enum { OL_TX_SCHED_WRR_ADV_ ## cat ## _DISCARD_WEIGHT = \
|
|
|
+ (discard_weights) }; \
|
|
|
+ } while (0)
|
|
|
/* Rome:
|
|
|
* For high-volume traffic flows (VI, BE, BK), use a credit threshold
|
|
|
* roughly equal to a large A-MPDU (occupying half the target memory
|
|
@@ -570,17 +573,11 @@ OL_TX_SCHED_WRR_ADV_CAT_CFG_SPEC(MCAST_MGMT, 1, 1, 4, 0, 1);
|
|
|
.stat.cat_name = #category; \
|
|
|
} while (0)
|
|
|
#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_QUEUED(category, frms) \
|
|
|
- do { \
|
|
|
- category->stat.queued += frms; \
|
|
|
- } while (0)
|
|
|
+ category->stat.queued += frms;
|
|
|
#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISCARD(category, frms) \
|
|
|
- do { \
|
|
|
- category->stat.discard += frms; \
|
|
|
- } while (0)
|
|
|
+ category->stat.discard += frms;
|
|
|
#define OL_TX_SCHED_WRR_ADV_CAT_STAT_INC_DISPATCHED(category, frms) \
|
|
|
- do { \
|
|
|
- category->stat.dispatched += frms; \
|
|
|
- } while (0)
|
|
|
+ category->stat.dispatched += frms;
|
|
|
#define OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(scheduler) \
|
|
|
ol_tx_sched_wrr_adv_cat_stat_dump(scheduler)
|
|
|
#define OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(scheduler) \
|
|
@@ -643,6 +640,7 @@ static void ol_tx_sched_wrr_adv_cat_stat_dump(
|
|
|
struct ol_tx_sched_wrr_adv_t *scheduler)
|
|
|
{
|
|
|
int i;
|
|
|
+
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
"Scheduler Stats:");
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
@@ -666,6 +664,7 @@ static void ol_tx_sched_wrr_adv_cat_cur_state_dump(
|
|
|
struct ol_tx_sched_wrr_adv_t *scheduler)
|
|
|
{
|
|
|
int i;
|
|
|
+
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
"Scheduler State Snapshot:");
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
@@ -688,6 +687,7 @@ static void ol_tx_sched_wrr_adv_cat_stat_clear(
|
|
|
struct ol_tx_sched_wrr_adv_t *scheduler)
|
|
|
{
|
|
|
int i;
|
|
|
+
|
|
|
for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; ++i) {
|
|
|
scheduler->categories[i].stat.queued = 0;
|
|
|
scheduler->categories[i].stat.discard = 0;
|
|
@@ -785,8 +785,10 @@ ol_tx_sched_select_batch_wrr_adv(
|
|
|
/* skip this cateogry (move it to the back) */
|
|
|
ol_tx_sched_wrr_adv_rotate_order_list_tail(scheduler,
|
|
|
index);
|
|
|
- /* try again (iterate) on the new element
|
|
|
- *that was moved up */
|
|
|
+ /*
|
|
|
+ * try again (iterate) on the new element
|
|
|
+ * that was moved up
|
|
|
+ */
|
|
|
continue;
|
|
|
}
|
|
|
/* found the first active category whose WRR turn is present */
|
|
@@ -878,12 +880,11 @@ ol_tx_sched_select_batch_wrr_adv(
|
|
|
TAILQ_INSERT_HEAD(&category->state.head, txq,
|
|
|
list_elem);
|
|
|
return 0;
|
|
|
- } else {
|
|
|
- TAILQ_INSERT_TAIL(&category->state.head, txq,
|
|
|
- list_elem);
|
|
|
- if (!pdev->tx_sched.last_used_txq)
|
|
|
- pdev->tx_sched.last_used_txq = txq;
|
|
|
}
|
|
|
+ TAILQ_INSERT_TAIL(&category->state.head, txq,
|
|
|
+ list_elem);
|
|
|
+ if (!pdev->tx_sched.last_used_txq)
|
|
|
+ pdev->tx_sched.last_used_txq = txq;
|
|
|
}
|
|
|
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
|
|
|
} else {
|
|
@@ -961,6 +962,7 @@ ol_tx_sched_discard_select_category_wrr_adv(struct ol_txrx_pdev_t *pdev)
|
|
|
*/
|
|
|
for (i = 0; i < OL_TX_SCHED_WRR_ADV_NUM_CATEGORIES; i++) {
|
|
|
int score;
|
|
|
+
|
|
|
score =
|
|
|
scheduler->categories[i].state.frms *
|
|
|
scheduler->categories[i].specs.discard_weight;
|
|
@@ -1103,8 +1105,10 @@ ol_tx_sched_init_wrr_adv(
|
|
|
scheduler->categories[i].state.frms = 0;
|
|
|
/*scheduler->categories[i].state.bytes = 0;*/
|
|
|
TAILQ_INIT(&scheduler->categories[i].state.head);
|
|
|
- /* init categories to not be skipped before
|
|
|
- *their initial selection */
|
|
|
+ /*
|
|
|
+ * init categories to not be skipped before
|
|
|
+ * their initial selection
|
|
|
+ */
|
|
|
scheduler->categories[i].state.wrr_count =
|
|
|
scheduler->categories[i].specs.wrr_skip_weight - 1;
|
|
|
}
|
|
@@ -1195,7 +1199,7 @@ ol_txrx_set_wmm_param(struct cdp_pdev *pdev,
|
|
|
*/
|
|
|
void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
- OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(pdev->tx_sched.scheduler);
|
|
|
+ OL_TX_SCHED_WRR_ADV_CAT_STAT_DUMP(pdev->tx_sched.scheduler);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1206,7 +1210,7 @@ void ol_tx_sched_stats_display(struct ol_txrx_pdev_t *pdev)
|
|
|
*/
|
|
|
void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
- OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(pdev->tx_sched.scheduler);
|
|
|
+ OL_TX_SCHED_WRR_ADV_CAT_CUR_STATE_DUMP(pdev->tx_sched.scheduler);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1217,7 +1221,7 @@ void ol_tx_sched_cur_state_display(struct ol_txrx_pdev_t *pdev)
|
|
|
*/
|
|
|
void ol_tx_sched_stats_clear(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
- OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(pdev->tx_sched.scheduler);
|
|
|
+ OL_TX_SCHED_WRR_ADV_CAT_STAT_CLEAR(pdev->tx_sched.scheduler);
|
|
|
}
|
|
|
|
|
|
#endif /* OL_TX_SCHED == OL_TX_SCHED_WRR_ADV */
|
|
@@ -1256,8 +1260,10 @@ ol_tx_sched_discard_select(
|
|
|
u_int32_t credit;
|
|
|
struct ol_tx_sched_notify_ctx_t notify_ctx;
|
|
|
|
|
|
- /* first decide what category of traffic (e.g. TID or AC)
|
|
|
- *to discard next */
|
|
|
+ /*
|
|
|
+ * first decide what category of traffic (e.g. TID or AC)
|
|
|
+ * to discard next
|
|
|
+ */
|
|
|
cat = ol_tx_sched_discard_select_category(pdev);
|
|
|
|
|
|
/* then decide which peer within this category to discard from next */
|
|
@@ -1371,10 +1377,10 @@ ol_tx_sched_dispatch(
|
|
|
{
|
|
|
qdf_nbuf_t msdu, prev = NULL, head_msdu = NULL;
|
|
|
struct ol_tx_desc_t *tx_desc;
|
|
|
-
|
|
|
u_int16_t *msdu_id_storage;
|
|
|
u_int16_t msdu_id;
|
|
|
int num_msdus = 0;
|
|
|
+
|
|
|
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
|
|
|
while (sctx->frms) {
|
|
|
tx_desc = TAILQ_FIRST(&sctx->head);
|
|
@@ -1469,8 +1475,10 @@ ol_tx_sched(struct ol_txrx_pdev_t *pdev)
|
|
|
pdev->tx_sched.tx_sched_status = ol_tx_scheduler_running;
|
|
|
|
|
|
ol_tx_sched_log(pdev);
|
|
|
- /*adf_os_print("BEFORE tx sched:\n");*/
|
|
|
- /*ol_tx_queues_display(pdev);*/
|
|
|
+ /*
|
|
|
+ *adf_os_print("BEFORE tx sched:\n");
|
|
|
+ *ol_tx_queues_display(pdev);
|
|
|
+ */
|
|
|
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
|
|
|
|
|
|
TAILQ_INIT(&sctx.head);
|
|
@@ -1479,6 +1487,7 @@ ol_tx_sched(struct ol_txrx_pdev_t *pdev)
|
|
|
ol_tx_sched_select_init(pdev);
|
|
|
while (qdf_atomic_read(&pdev->target_tx_credit) > 0) {
|
|
|
int num_credits;
|
|
|
+
|
|
|
qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
|
|
|
credit = qdf_atomic_read(&pdev->target_tx_credit);
|
|
|
num_credits = ol_tx_sched_select_batch(pdev, &sctx, credit);
|
|
@@ -1501,8 +1510,10 @@ ol_tx_sched(struct ol_txrx_pdev_t *pdev)
|
|
|
ol_tx_sched_dispatch(pdev, &sctx);
|
|
|
|
|
|
qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
|
|
|
- /*adf_os_print("AFTER tx sched:\n");*/
|
|
|
- /*ol_tx_queues_display(pdev);*/
|
|
|
+ /*
|
|
|
+ *adf_os_print("AFTER tx sched:\n");
|
|
|
+ *ol_tx_queues_display(pdev);
|
|
|
+ */
|
|
|
|
|
|
pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
|
|
|
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
|
|
@@ -1575,6 +1586,7 @@ ol_tx_sched_log(struct ol_txrx_pdev_t *pdev)
|
|
|
i < OL_TX_SCHED_NUM_CATEGORIES && j < num_cats_active;
|
|
|
i++) {
|
|
|
u_int8_t *p;
|
|
|
+
|
|
|
ol_tx_sched_category_info(pdev, i, &active, &frms, &bytes);
|
|
|
if (!active)
|
|
|
continue;
|