123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429 |
- /*
- * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
- /*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
- #include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
- #include <cdf_atomic.h> /* cdf_atomic_read, etc. */
- #include <ol_cfg.h> /* ol_cfg_addba_retry */
- #include <htt.h> /* HTT_TX_EXT_TID_MGMT */
- #include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
- #include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
- #include <ol_txrx_ctrl_api.h> /* ol_txrx_sync, ol_tx_addba_conf */
- #include <ol_ctrl_txrx_api.h> /* ol_ctrl_addba_req */
- #include <ol_txrx_internal.h> /* TXRX_ASSERT1, etc. */
- #include <ol_txrx_types.h> /* pdev stats */
- #include <ol_tx_desc.h> /* ol_tx_desc, ol_tx_desc_frame_list_free */
- #include <ol_tx.h> /* ol_tx_vdev_ll_pause_queue_send */
- #include <ol_tx_queue.h>
- #include <ol_txrx_dbg.h> /* ENABLE_TX_QUEUE_LOG */
- #include <cdf_types.h> /* bool */
- #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
- void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
- {
- /* TO DO: log the queue pause */
- /* acquire the mutex lock, since we'll be modifying the queues */
- TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
- cdf_spin_lock_bh(&vdev->ll_pause.mutex);
- vdev->ll_pause.paused_reason |= reason;
- vdev->ll_pause.q_pause_cnt++;
- vdev->ll_pause.is_q_paused = true;
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- DPTRACE(cdf_dp_trace(NULL, CDF_DP_TRACE_VDEV_PAUSE,
- NULL, 0));
- TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
- }
- void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
- {
- /* TO DO: log the queue unpause */
- /* acquire the mutex lock, since we'll be modifying the queues */
- TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
- cdf_spin_lock_bh(&vdev->ll_pause.mutex);
- if (vdev->ll_pause.paused_reason & reason) {
- vdev->ll_pause.paused_reason &= ~reason;
- if (!vdev->ll_pause.paused_reason) {
- vdev->ll_pause.is_q_paused = false;
- vdev->ll_pause.q_unpause_cnt++;
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- ol_tx_vdev_ll_pause_queue_send(vdev);
- } else {
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- }
- } else {
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- }
- DPTRACE(cdf_dp_trace(NULL, CDF_DP_TRACE_VDEV_UNPAUSE,
- NULL, 0));
- TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
- }
- void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
- {
- cdf_spin_lock_bh(&vdev->ll_pause.mutex);
- cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
- vdev->ll_pause.is_q_timer_on = false;
- while (vdev->ll_pause.txq.head) {
- cdf_nbuf_t next =
- cdf_nbuf_next(vdev->ll_pause.txq.head);
- cdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
- cdf_nbuf_unmap(vdev->pdev->osdev,
- vdev->ll_pause.txq.head,
- CDF_DMA_TO_DEVICE);
- cdf_nbuf_tx_free(vdev->ll_pause.txq.head,
- NBUF_PKT_ERROR);
- vdev->ll_pause.txq.head = next;
- }
- vdev->ll_pause.txq.tail = NULL;
- vdev->ll_pause.txq.depth = 0;
- cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
- }
- #endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
- #ifdef QCA_LL_TX_FLOW_CONTROL_V2
- /**
- * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type
- * @reason: reason
- *
- * Return: netif_reason_type
- */
- enum netif_reason_type
- ol_txrx_map_to_netif_reason_type(uint32_t reason)
- {
- switch (reason) {
- case OL_TXQ_PAUSE_REASON_FW:
- return WLAN_FW_PAUSE;
- case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED:
- return WLAN_PEER_UNAUTHORISED;
- case OL_TXQ_PAUSE_REASON_TX_ABORT:
- return WLAN_TX_ABORT;
- case OL_TXQ_PAUSE_REASON_VDEV_STOP:
- return WLAN_VDEV_STOP;
- case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
- return WLAN_THERMAL_MITIGATION;
- default:
- TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
- "%s: reason not supported %d\n",
- __func__, reason);
- return WLAN_REASON_TYPE_MAX;
- }
- }
- /**
- * ol_txrx_vdev_pause() - pause vdev network queues
- * @vdev: vdev handle
- * @reason: reason
- *
- * Return: none
- */
- void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
- {
- struct ol_txrx_pdev_t *pdev = vdev->pdev;
- enum netif_reason_type netif_reason;
- if (cdf_unlikely((!pdev) || (!pdev->pause_cb))) {
- TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
- "%s: invalid pdev\n", __func__);
- return;
- }
- netif_reason = ol_txrx_map_to_netif_reason_type(reason);
- if (netif_reason == WLAN_REASON_TYPE_MAX)
- return;
- pdev->pause_cb(vdev->vdev_id, WLAN_NETIF_TX_DISABLE, netif_reason);
- }
- /**
- * ol_txrx_vdev_unpause() - unpause vdev network queues
- * @vdev: vdev handle
- * @reason: reason
- *
- * Return: none
- */
- void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
- {
- struct ol_txrx_pdev_t *pdev = vdev->pdev;
- enum netif_reason_type netif_reason;
- if (cdf_unlikely((!pdev) || (!pdev->pause_cb))) {
- TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
- "%s: invalid pdev\n", __func__);
- return;
- }
- netif_reason = ol_txrx_map_to_netif_reason_type(reason);
- if (netif_reason == WLAN_REASON_TYPE_MAX)
- return;
- pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE,
- netif_reason);
- }
- /**
- * ol_txrx_pdev_pause() - pause network queues for each vdev
- * @pdev: pdev handle
- * @reason: reason
- *
- * Return: none
- */
- void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
- {
- struct ol_txrx_vdev_t *vdev = NULL, *tmp;
- TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
- ol_txrx_vdev_pause(vdev, reason);
- }
- }
- /**
- * ol_txrx_pdev_unpause() - unpause network queues for each vdev
- * @pdev: pdev handle
- * @reason: reason
- *
- * Return: none
- */
- void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
- {
- struct ol_txrx_vdev_t *vdev = NULL, *tmp;
- TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
- ol_txrx_vdev_unpause(vdev, reason);
- }
- }
- #endif
- /*--- LL tx throttle queue code --------------------------------------------*/
- #if defined(QCA_SUPPORT_TX_THROTTLE)
- uint8_t ol_tx_pdev_is_target_empty(void)
- {
- /* TM TODO */
- return 1;
- }
- #ifdef QCA_LL_TX_FLOW_CONTROL_V2
- /**
- * ol_txrx_thermal_pause() - pause due to thermal mitigation
- * @pdev: pdev handle
- *
- * Return: none
- */
- static inline
- void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
- {
- ol_txrx_pdev_pause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
- return;
- }
- /**
- * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
- * @pdev: pdev handle
- *
- * Return: none
- */
- static inline
- void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
- {
- ol_txrx_pdev_unpause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
- return;
- }
- #else
- /**
- * ol_txrx_thermal_pause() - pause due to thermal mitigation
- * @pdev: pdev handle
- *
- * Return: none
- */
- static inline
- void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
- {
- return;
- }
- /**
- * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
- * @pdev: pdev handle
- *
- * Return: none
- */
- static inline
- void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
- {
- ol_tx_pdev_ll_pause_queue_send_all(pdev);
- return;
- }
- #endif
- void ol_tx_pdev_throttle_phase_timer(void *context)
- {
- struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
- int ms;
- enum throttle_level cur_level;
- enum throttle_phase cur_phase;
- /* update the phase */
- pdev->tx_throttle.current_throttle_phase++;
- if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_MAX)
- pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
- if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) {
- /* Traffic is stopped */
- TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
- "throttle phase --> OFF\n");
- ol_txrx_thermal_pause(pdev);
- cur_level = pdev->tx_throttle.current_throttle_level;
- cur_phase = pdev->tx_throttle.current_throttle_phase;
- ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
- if (pdev->tx_throttle.current_throttle_level !=
- THROTTLE_LEVEL_0) {
- TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
- "start timer %d ms\n", ms);
- cdf_softirq_timer_start(&pdev->tx_throttle.
- phase_timer, ms);
- }
- } else {
- /* Traffic can go */
- TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
- "throttle phase --> ON\n");
- ol_txrx_thermal_unpause(pdev);
- cur_level = pdev->tx_throttle.current_throttle_level;
- cur_phase = pdev->tx_throttle.current_throttle_phase;
- ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
- if (pdev->tx_throttle.current_throttle_level !=
- THROTTLE_LEVEL_0) {
- TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "start timer %d ms\n",
- ms);
- cdf_softirq_timer_start(&pdev->tx_throttle.phase_timer,
- ms);
- }
- }
- }
- #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
- void ol_tx_pdev_throttle_tx_timer(void *context)
- {
- struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
- ol_tx_pdev_ll_pause_queue_send_all(pdev);
- }
- #endif
- void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level)
- {
- int ms = 0;
- if (level >= THROTTLE_LEVEL_MAX) {
- TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
- "%s invalid throttle level set %d, ignoring\n",
- __func__, level);
- return;
- }
- TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Setting throttle level %d\n", level);
- /* Set the current throttle level */
- pdev->tx_throttle.current_throttle_level = (enum throttle_level) level;
- /* Reset the phase */
- pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
- ol_txrx_thermal_unpause(pdev);
- /* Start with the new time */
- ms = pdev->tx_throttle.
- throttle_time_ms[level][THROTTLE_PHASE_OFF];
- cdf_softirq_timer_cancel(&pdev->tx_throttle.phase_timer);
- if (level != THROTTLE_LEVEL_0)
- cdf_softirq_timer_start(&pdev->tx_throttle.phase_timer, ms);
- }
- /* This table stores the duty cycle for each level.
- Example "on" time for level 2 with duty period 100ms is:
- "on" time = duty_period_ms >> throttle_duty_cycle_table[2]
- "on" time = 100 ms >> 2 = 25ms */
- static uint8_t g_throttle_duty_cycle_table[THROTTLE_LEVEL_MAX] = { 0, 1, 2, 4 };
- void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period)
- {
- int i;
- /* Set the current throttle level */
- pdev->tx_throttle.throttle_period_ms = period;
- TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "level OFF ON\n");
- for (i = 0; i < THROTTLE_LEVEL_MAX; i++) {
- pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_ON] =
- pdev->tx_throttle.throttle_period_ms >>
- g_throttle_duty_cycle_table[i];
- pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_OFF] =
- pdev->tx_throttle.throttle_period_ms -
- pdev->tx_throttle.throttle_time_ms[
- i][THROTTLE_PHASE_ON];
- TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%d %d %d\n", i,
- pdev->tx_throttle.
- throttle_time_ms[i][THROTTLE_PHASE_OFF],
- pdev->tx_throttle.
- throttle_time_ms[i][THROTTLE_PHASE_ON]);
- }
- }
- void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev)
- {
- uint32_t throttle_period;
- pdev->tx_throttle.current_throttle_level = THROTTLE_LEVEL_0;
- pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
- cdf_spinlock_init(&pdev->tx_throttle.mutex);
- throttle_period = ol_cfg_throttle_period_ms(pdev->ctrl_pdev);
- ol_tx_throttle_init_period(pdev, throttle_period);
- cdf_softirq_timer_init(pdev->osdev,
- &pdev->tx_throttle.phase_timer,
- ol_tx_pdev_throttle_phase_timer, pdev,
- CDF_TIMER_TYPE_SW);
- #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
- cdf_softirq_timer_init(pdev->osdev,
- &pdev->tx_throttle.tx_timer,
- ol_tx_pdev_throttle_tx_timer, pdev,
- CDF_TIMER_TYPE_SW);
- #endif
- pdev->tx_throttle.tx_threshold = THROTTLE_TX_THRESHOLD;
- }
- #endif /* QCA_SUPPORT_TX_THROTTLE */
- /*--- End of LL tx throttle queue code ---------------------------------------*/
|