|
@@ -118,6 +118,7 @@ static void
|
|
|
ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
|
|
|
{
|
|
|
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
+
|
|
|
if (bss_addr && vdev->last_real_peer &&
|
|
|
!qdf_mem_cmp((u8 *)bss_addr,
|
|
|
vdev->last_real_peer->mac_addr.raw,
|
|
@@ -142,16 +143,17 @@ ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
|
|
|
struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
ol_txrx_peer_handle peer;
|
|
|
+
|
|
|
if (vdev->last_real_peer == NULL) {
|
|
|
- peer = NULL;
|
|
|
- peer = ol_txrx_find_peer_by_addr(
|
|
|
- (struct cdp_pdev *)pdev,
|
|
|
- vdev->hl_tdls_ap_mac_addr.raw,
|
|
|
- peer_id);
|
|
|
- if (peer && (peer->peer_ids[0] !=
|
|
|
- HTT_INVALID_PEER_ID))
|
|
|
- vdev->last_real_peer = peer;
|
|
|
- }
|
|
|
+ peer = NULL;
|
|
|
+ peer = ol_txrx_find_peer_by_addr(
|
|
|
+ (struct cdp_pdev *)pdev,
|
|
|
+ vdev->hl_tdls_ap_mac_addr.raw,
|
|
|
+ peer_id);
|
|
|
+ if (peer && (peer->peer_ids[0] !=
|
|
|
+ HTT_INVALID_PEER_ID))
|
|
|
+ vdev->last_real_peer = peer;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -165,6 +167,7 @@ is_vdev_restore_last_peer(void *ppeer)
|
|
|
{
|
|
|
struct ol_txrx_peer_t *peer = ppeer;
|
|
|
struct ol_txrx_vdev_t *vdev;
|
|
|
+
|
|
|
vdev = peer->vdev;
|
|
|
return vdev->last_real_peer && (vdev->last_real_peer == peer);
|
|
|
}
|
|
@@ -185,6 +188,7 @@ ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
|
|
|
struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
struct ol_txrx_peer_t *peer = ppeer;
|
|
|
struct ol_txrx_vdev_t *vdev;
|
|
|
+
|
|
|
vdev = peer->vdev;
|
|
|
if (restore_last_peer && (vdev->last_real_peer == NULL)) {
|
|
|
peer = NULL;
|
|
@@ -268,12 +272,12 @@ ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev)
|
|
|
void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
|
|
|
{
|
|
|
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
+
|
|
|
if (!pdev) {
|
|
|
qdf_print("%s: pdev is NULL\n", __func__);
|
|
|
return;
|
|
|
}
|
|
|
pdev->is_mgmt_over_wmi_enabled = value;
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -284,6 +288,7 @@ void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
|
|
|
uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
|
|
|
{
|
|
|
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
+
|
|
|
if (!pdev) {
|
|
|
qdf_print("%s: pdev is NULL\n", __func__);
|
|
|
return 0;
|
|
@@ -396,6 +401,7 @@ ol_txrx_peer_find_by_local_id(struct cdp_pdev *ppdev,
|
|
|
{
|
|
|
struct ol_txrx_peer_t *peer;
|
|
|
struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
|
|
+
|
|
|
if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
|
|
|
(local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
|
|
|
return NULL;
|
|
@@ -452,6 +458,7 @@ ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
|
|
|
struct ol_txrx_peer_t *peer)
|
|
|
{
|
|
|
int i = peer->local_id;
|
|
|
+
|
|
|
if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
|
|
|
(i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
|
|
|
return;
|
|
@@ -522,6 +529,7 @@ void ol_txrx_update_tx_queue_groups(
|
|
|
u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
|
|
|
u_int32_t membership;
|
|
|
struct ol_txrx_vdev_t *vdev;
|
|
|
+
|
|
|
group = &pdev->txq_grps[group_id];
|
|
|
|
|
|
membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
|
|
@@ -613,6 +621,7 @@ static inline void setup_fastpath_ce_handles(struct hif_opaque_softc *osc,
|
|
|
static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
|
|
|
{
|
|
|
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
+
|
|
|
if (!pdev) {
|
|
|
qdf_print("%s: pdev is NULL\n", __func__);
|
|
|
return;
|
|
@@ -622,7 +631,6 @@ static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
|
|
|
pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
|
|
|
ol_txrx_info("Global pool size: %d\n",
|
|
|
pdev->num_msdu_desc);
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -702,11 +710,12 @@ static inline uint16_t
|
|
|
ol_txrx_rsrc_threshold_lo(int desc_pool_size)
|
|
|
{
|
|
|
int threshold_low;
|
|
|
+
|
|
|
/*
|
|
|
- * 5% margin of unallocated desc is too much for per
|
|
|
- * vdev mechanism.
|
|
|
- * Define the value seperately.
|
|
|
- */
|
|
|
+ * 5% margin of unallocated desc is too much for per
|
|
|
+ * vdev mechanism.
|
|
|
+ * Define the value seperately.
|
|
|
+ */
|
|
|
threshold_low = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
|
|
|
|
|
|
return threshold_low;
|
|
@@ -791,13 +800,11 @@ ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
|
|
|
static inline void
|
|
|
ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
|
|
@@ -835,13 +842,11 @@ ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
|
|
|
static inline void
|
|
|
ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -874,6 +879,7 @@ static void
|
|
|
ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
|
|
|
{
|
|
|
u_int8_t i;
|
|
|
+
|
|
|
for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
|
|
|
TAILQ_INIT(&vdev->txqs[i].head);
|
|
|
vdev->txqs[i].paused_count.total = 0;
|
|
@@ -920,6 +926,7 @@ ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
|
|
|
{
|
|
|
uint8_t i;
|
|
|
struct ol_txrx_vdev_t *vdev = peer->vdev;
|
|
|
+
|
|
|
qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
|
|
|
for (i = 0; i < OL_TX_NUM_TIDS; i++) {
|
|
|
TAILQ_INIT(&peer->txqs[i].head);
|
|
@@ -965,27 +972,23 @@ ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
|
|
|
static inline void
|
|
|
ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
|
|
|
struct ol_txrx_peer_t *peer)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
|
|
|
struct ol_txrx_peer_t *peer)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -1366,8 +1369,10 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
|
|
|
goto desc_alloc_fail;
|
|
|
}
|
|
|
if (!ret && htt_frag_desc) {
|
|
|
- /* Initialize the first 6 words (TSO flags)
|
|
|
- of the frag descriptor */
|
|
|
+ /*
|
|
|
+ * Initialize the first 6 words (TSO flags)
|
|
|
+ * of the frag descriptor
|
|
|
+ */
|
|
|
memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
|
|
|
c_element->tx_desc.htt_frag_desc = htt_frag_desc;
|
|
|
c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
|
|
@@ -1433,8 +1438,10 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
|
|
|
pdev->sw_pf_proc_enable = 1;
|
|
|
|
|
|
if (pdev->frame_format == wlan_frm_fmt_802_3) {
|
|
|
- /* sw llc process is only needed in
|
|
|
- 802.3 to 802.11 transform case */
|
|
|
+ /*
|
|
|
+ * sw llc process is only needed in
|
|
|
+ * 802.3 to 802.11 transform case
|
|
|
+ */
|
|
|
pdev->sw_tx_llc_proc_enable = 1;
|
|
|
pdev->sw_rx_llc_proc_enable = 1;
|
|
|
} else {
|
|
@@ -1498,8 +1505,10 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
|
|
|
* the rx->tx forwarding.
|
|
|
*/
|
|
|
if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
|
|
|
- /* PN check, rx-tx forwarding and rx reorder is done by
|
|
|
- the target */
|
|
|
+ /*
|
|
|
+ * PN check, rx-tx forwarding and rx reorder is done by
|
|
|
+ * the target
|
|
|
+ */
|
|
|
if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
|
|
|
pdev->rx_opt_proc = ol_rx_in_order_deliver;
|
|
|
else
|
|
@@ -1539,8 +1548,10 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
|
|
|
*/
|
|
|
pdev->rx_opt_proc = ol_rx_fwd_check;
|
|
|
} else {
|
|
|
- /* rx->tx forwarding either done in target,
|
|
|
- * or not done at all */
|
|
|
+ /*
|
|
|
+ * rx->tx forwarding either done in target,
|
|
|
+ * or not done at all
|
|
|
+ */
|
|
|
pdev->rx_opt_proc = ol_rx_deliver;
|
|
|
}
|
|
|
}
|
|
@@ -1604,12 +1615,12 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
|
|
|
/* avg = 100% * new + 0% * old */ \
|
|
|
(1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
|
|
|
/*
|
|
|
-#else
|
|
|
-#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
|
|
|
- //avg = 25% * new + 25% * old
|
|
|
- (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
|
|
|
-#endif
|
|
|
-*/
|
|
|
+ * #else
|
|
|
+ * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
|
|
|
+ * //avg = 25% * new + 25% * old
|
|
|
+ * (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
|
|
|
+ * #endif
|
|
|
+ */
|
|
|
pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
|
|
|
pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
|
|
|
#endif
|
|
@@ -1630,6 +1641,7 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
|
|
|
pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
|
|
|
{
|
|
|
uint32_t bin_width_1000ticks;
|
|
|
+
|
|
|
bin_width_1000ticks =
|
|
|
qdf_system_msecs_to_ticks
|
|
|
(QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
|
|
@@ -1918,7 +1930,6 @@ ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
|
|
|
static inline void
|
|
|
ol_txrx_vdev_tx_desc_cnt_init(struct ol_txrx_vdev_t *vdev)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -2069,7 +2080,6 @@ static void ol_txrx_vdev_register(struct cdp_vdev *pvdev,
|
|
|
*/
|
|
|
void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
|
|
|
{
|
|
|
- return;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -2171,6 +2181,7 @@ ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
|
|
|
vdev->ll_pause.is_q_timer_on = false;
|
|
|
while (vdev->ll_pause.txq.head) {
|
|
|
qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
|
|
|
+
|
|
|
qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
|
|
|
qdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
|
|
|
QDF_DMA_TO_DEVICE);
|
|
@@ -2223,15 +2234,15 @@ ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
|
|
|
htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
|
|
|
|
|
|
/*
|
|
|
- * The ol_tx_desc_free might access the invalid content of vdev referred
|
|
|
- * by tx desc, since this vdev might be detached in another thread
|
|
|
- * asynchronous.
|
|
|
- *
|
|
|
- * Go through tx desc pool to set corresponding tx desc's vdev to NULL
|
|
|
- * when detach this vdev, and add vdev checking in the ol_tx_desc_free
|
|
|
- * to avoid crash.
|
|
|
- *
|
|
|
- */
|
|
|
+ * The ol_tx_desc_free might access the invalid content of vdev referred
|
|
|
+ * by tx desc, since this vdev might be detached in another thread
|
|
|
+ * asynchronous.
|
|
|
+ *
|
|
|
+ * Go through tx desc pool to set corresponding tx desc's vdev to NULL
|
|
|
+ * when detach this vdev, and add vdev checking in the ol_tx_desc_free
|
|
|
+ * to avoid crash.
|
|
|
+ *
|
|
|
+ */
|
|
|
ol_txrx_tx_desc_reset_vdev(vdev);
|
|
|
|
|
|
/*
|
|
@@ -2756,8 +2767,8 @@ ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
|
|
|
"%s: self peer removed by caller ",
|
|
|
__func__);
|
|
|
break;
|
|
|
- } else
|
|
|
- temp = peer;
|
|
|
+ }
|
|
|
+ temp = peer;
|
|
|
}
|
|
|
|
|
|
qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
|
|
@@ -3029,6 +3040,7 @@ ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
|
|
|
{
|
|
|
|
|
|
struct ol_txrx_peer_t *peer;
|
|
|
+
|
|
|
peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
|
|
|
if (peer)
|
|
|
return peer->uapsd_mask;
|
|
@@ -3154,27 +3166,26 @@ int ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
|
|
|
qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
|
|
|
|
|
|
/*
|
|
|
- * The ol_tx_desc_free might access the invalid content of vdev
|
|
|
- * referred by tx desc, since this vdev might be detached in
|
|
|
- * another thread asynchronous.
|
|
|
- *
|
|
|
- * Go through tx desc pool to set corresponding tx desc's vdev
|
|
|
- * to NULL when detach this vdev, and add vdev checking in the
|
|
|
- * ol_tx_desc_free to avoid crash.
|
|
|
- *
|
|
|
- */
|
|
|
+ * The ol_tx_desc_free might access the invalid
|
|
|
+ * content of vdev referred by tx desc, since
|
|
|
+ * this vdev might be detached in another thread
|
|
|
+ * asynchronous.
|
|
|
+ *
|
|
|
+ * Go through tx desc pool to set corresponding
|
|
|
+ * tx desc's vdev to NULL when detach this vdev,
|
|
|
+ * and add vdev checking in the ol_tx_desc_free
|
|
|
+ * to avoid crash.
|
|
|
+ */
|
|
|
ol_txrx_tx_desc_reset_vdev(vdev);
|
|
|
ol_txrx_dbg(
|
|
|
- "%s: deleting vdev object %p "
|
|
|
- "(%02x:%02x:%02x:%02x:%02x:%02x)"
|
|
|
- " - its last peer is done",
|
|
|
- __func__, vdev,
|
|
|
- vdev->mac_addr.raw[0],
|
|
|
- vdev->mac_addr.raw[1],
|
|
|
- vdev->mac_addr.raw[2],
|
|
|
- vdev->mac_addr.raw[3],
|
|
|
- vdev->mac_addr.raw[4],
|
|
|
- vdev->mac_addr.raw[5]);
|
|
|
+ "%s: deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
|
|
|
+ __func__, vdev,
|
|
|
+ vdev->mac_addr.raw[0],
|
|
|
+ vdev->mac_addr.raw[1],
|
|
|
+ vdev->mac_addr.raw[2],
|
|
|
+ vdev->mac_addr.raw[3],
|
|
|
+ vdev->mac_addr.raw[4],
|
|
|
+ vdev->mac_addr.raw[5]);
|
|
|
/* all peers are gone, go ahead and delete it */
|
|
|
qdf_mem_free(vdev);
|
|
|
if (vdev_delete_cb)
|
|
@@ -3374,6 +3385,7 @@ ol_txrx_peer_handle
|
|
|
ol_txrx_peer_find_by_addr(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac_addr)
|
|
|
{
|
|
|
struct ol_txrx_peer_t *peer;
|
|
|
+
|
|
|
peer = ol_txrx_peer_find_hash_find(pdev, peer_mac_addr, 0, 0);
|
|
|
if (peer) {
|
|
|
ol_txrx_info(
|
|
@@ -3406,7 +3418,6 @@ static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
|
|
|
"total tx credit %d num_free %d",
|
|
|
total, num_free);
|
|
|
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -3535,9 +3546,11 @@ int ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
|
|
|
void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
|
|
|
{
|
|
|
ol_tx_desc_list tx_descs;
|
|
|
- /* First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
|
|
|
+ /*
|
|
|
+ * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
|
|
|
* then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
|
|
|
- * which is tha same with normal data send complete path*/
|
|
|
+ * which is tha same with normal data send complete path
|
|
|
+ */
|
|
|
htt_tx_pending_discard(pdev_handle->htt_pdev);
|
|
|
|
|
|
TAILQ_INIT(&tx_descs);
|
|
@@ -3573,6 +3586,7 @@ ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
|
|
|
uint8_t cfg_stats_type, uint32_t cfg_val)
|
|
|
{
|
|
|
uint64_t dummy_cookie = 0;
|
|
|
+
|
|
|
htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
|
|
|
0 /* reset mask */,
|
|
|
cfg_stats_type, cfg_val, dummy_cookie);
|
|
@@ -3716,14 +3730,16 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
|
|
|
/* TO DO: add copying to the requestor's buf */
|
|
|
|
|
|
case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
|
|
|
- bytes = sizeof(struct rx_remote_buffer_mgmt_stats);
|
|
|
+ bytes = sizeof(struct
|
|
|
+ rx_remote_buffer_mgmt_stats);
|
|
|
if (req->base.copy.buf) {
|
|
|
int limit;
|
|
|
|
|
|
- limit = sizeof(struct rx_remote_buffer_mgmt_stats);
|
|
|
- if (req->base.copy.byte_limit < limit) {
|
|
|
- limit = req->base.copy.byte_limit;
|
|
|
- }
|
|
|
+ limit = sizeof(struct
|
|
|
+ rx_remote_buffer_mgmt_stats);
|
|
|
+ if (req->base.copy.byte_limit < limit)
|
|
|
+ limit = req->base.copy.
|
|
|
+ byte_limit;
|
|
|
buf = req->base.copy.buf + req->offset;
|
|
|
qdf_mem_copy(buf, stats_data, limit);
|
|
|
}
|
|
@@ -3734,9 +3750,11 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
|
|
|
if (req->base.copy.buf) {
|
|
|
int limit;
|
|
|
|
|
|
- limit = sizeof(struct wlan_dbg_txbf_data_stats);
|
|
|
+ limit = sizeof(struct
|
|
|
+ wlan_dbg_txbf_data_stats);
|
|
|
if (req->base.copy.byte_limit < limit)
|
|
|
- limit = req->base.copy.byte_limit;
|
|
|
+ limit = req->base.copy.
|
|
|
+ byte_limit;
|
|
|
buf = req->base.copy.buf + req->offset;
|
|
|
qdf_mem_copy(buf, stats_data, limit);
|
|
|
}
|
|
@@ -3747,22 +3765,27 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
|
|
|
if (req->base.copy.buf) {
|
|
|
int limit;
|
|
|
|
|
|
- limit = sizeof(struct wlan_dbg_txbf_snd_stats);
|
|
|
+ limit = sizeof(struct
|
|
|
+ wlan_dbg_txbf_snd_stats);
|
|
|
if (req->base.copy.byte_limit < limit)
|
|
|
- limit = req->base.copy.byte_limit;
|
|
|
+ limit = req->base.copy.
|
|
|
+ byte_limit;
|
|
|
buf = req->base.copy.buf + req->offset;
|
|
|
qdf_mem_copy(buf, stats_data, limit);
|
|
|
}
|
|
|
break;
|
|
|
|
|
|
case HTT_DBG_STATS_TX_SELFGEN_INFO:
|
|
|
- bytes = sizeof(struct wlan_dbg_tx_selfgen_stats);
|
|
|
+ bytes = sizeof(struct
|
|
|
+ wlan_dbg_tx_selfgen_stats);
|
|
|
if (req->base.copy.buf) {
|
|
|
int limit;
|
|
|
|
|
|
- limit = sizeof(struct wlan_dbg_tx_selfgen_stats);
|
|
|
+ limit = sizeof(struct
|
|
|
+ wlan_dbg_tx_selfgen_stats);
|
|
|
if (req->base.copy.byte_limit < limit)
|
|
|
- limit = req->base.copy.byte_limit;
|
|
|
+ limit = req->base.copy.
|
|
|
+ byte_limit;
|
|
|
buf = req->base.copy.buf + req->offset;
|
|
|
qdf_mem_copy(buf, stats_data, limit);
|
|
|
}
|
|
@@ -3774,10 +3797,11 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
|
|
|
if (req->base.copy.buf) {
|
|
|
int limit;
|
|
|
|
|
|
- limit =
|
|
|
- sizeof(struct wlan_dbg_wifi2_error_stats);
|
|
|
+ limit = sizeof(struct
|
|
|
+ wlan_dbg_wifi2_error_stats);
|
|
|
if (req->base.copy.byte_limit < limit)
|
|
|
- limit = req->base.copy.byte_limit;
|
|
|
+ limit = req->base.copy.
|
|
|
+ byte_limit;
|
|
|
buf = req->base.copy.buf + req->offset;
|
|
|
qdf_mem_copy(buf, stats_data, limit);
|
|
|
}
|
|
@@ -3802,9 +3826,8 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
|
|
|
default:
|
|
|
break;
|
|
|
}
|
|
|
- buf = req->base.copy.buf
|
|
|
- ? req->base.copy.buf
|
|
|
- : stats_data;
|
|
|
+ buf = req->base.copy.buf ?
|
|
|
+ req->base.copy.buf : stats_data;
|
|
|
if (req->base.callback.fp)
|
|
|
req->base.callback.fp(req->base.callback.ctxt,
|
|
|
type, buf, bytes);
|
|
@@ -3830,9 +3853,8 @@ int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
|
|
|
"The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
|
|
|
#endif
|
|
|
}
|
|
|
- if (debug_specs & TXRX_DBG_MASK_STATS) {
|
|
|
+ if (debug_specs & TXRX_DBG_MASK_STATS)
|
|
|
ol_txrx_stats_display(vdev->pdev);
|
|
|
- }
|
|
|
if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
|
|
|
#if defined(ENABLE_TXRX_PROT_ANALYZE)
|
|
|
ol_txrx_prot_ans_display(vdev->pdev);
|
|
@@ -3931,13 +3953,14 @@ void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
|
|
|
* Return: length of string
|
|
|
*/
|
|
|
static int
|
|
|
-ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned buf_len)
|
|
|
+ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
|
|
|
{
|
|
|
uint32_t len = 0;
|
|
|
|
|
|
struct ol_txrx_vdev_t *vdev =
|
|
|
(struct ol_txrx_vdev_t *)
|
|
|
ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
|
|
+
|
|
|
if (!vdev) {
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
"%s: vdev is NULL", __func__);
|
|
@@ -3974,10 +3997,7 @@ void ol_txrx_stats_display(ol_txrx_pdev_handle pdev)
|
|
|
+ pdev->stats.pub.tx.dropped.target_discard.bytes
|
|
|
+ pdev->stats.pub.tx.dropped.no_ack.bytes);
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
- "successfully delivered: %lld (%lld B), "
|
|
|
- "download fail: %lld (%lld B), "
|
|
|
- "target discard: %lld (%lld B), "
|
|
|
- "no ack: %lld (%lld B)",
|
|
|
+ "successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B)",
|
|
|
pdev->stats.pub.tx.delivered.pkts,
|
|
|
pdev->stats.pub.tx.delivered.bytes,
|
|
|
pdev->stats.pub.tx.dropped.download_fail.pkts,
|
|
@@ -4168,6 +4188,7 @@ static int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
|
|
|
{
|
|
|
struct ol_txrx_vdev_t *vdev =
|
|
|
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
|
|
+
|
|
|
if (NULL == vdev) {
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
"%s: Invalid vdev_id %d", __func__, vdev_id);
|
|
@@ -4182,7 +4203,8 @@ static int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control callback
|
|
|
+ * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
|
|
|
+ * callback
|
|
|
* @vdev_id: vdev_id
|
|
|
*
|
|
|
* Return: 0 for success or error code
|
|
@@ -4191,6 +4213,7 @@ static int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
|
|
|
{
|
|
|
struct ol_txrx_vdev_t *vdev =
|
|
|
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
|
|
+
|
|
|
if (NULL == vdev) {
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
"%s: Invalid vdev_id", __func__);
|
|
@@ -4218,6 +4241,7 @@ ol_txrx_get_tx_resource(uint8_t sta_id,
|
|
|
unsigned int high_watermark_offset)
|
|
|
{
|
|
|
ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
|
|
|
+
|
|
|
if (NULL == vdev) {
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
"%s: Invalid sta_id %d", __func__, sta_id);
|
|
@@ -4257,6 +4281,7 @@ ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
|
|
|
{
|
|
|
struct ol_txrx_vdev_t *vdev =
|
|
|
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
|
|
+
|
|
|
if (NULL == vdev) {
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
"%s: Invalid vdev_id %d", __func__, vdev_id);
|
|
@@ -4533,7 +4558,6 @@ static void ol_txrx_clear_stats(uint16_t value)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -4621,9 +4645,11 @@ free_buf:
|
|
|
void ol_rx_data_process(struct ol_txrx_peer_t *peer,
|
|
|
qdf_nbuf_t rx_buf_list)
|
|
|
{
|
|
|
- /* Firmware data path active response will use shim RX thread
|
|
|
+ /*
|
|
|
+ * Firmware data path active response will use shim RX thread
|
|
|
* T2H MSG running on SIRQ context,
|
|
|
- * IPA kernel module API should not be called on SIRQ CTXT */
|
|
|
+ * IPA kernel module API should not be called on SIRQ CTXT
|
|
|
+ */
|
|
|
qdf_nbuf_t buf, next_buf;
|
|
|
ol_txrx_rx_fp data_rx = NULL;
|
|
|
ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
@@ -4864,6 +4890,7 @@ exit:
|
|
|
static QDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
|
|
|
{
|
|
|
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
+
|
|
|
if (!pdev || !pause_cb) {
|
|
|
ol_txrx_err("pdev or pause_cb is NULL");
|
|
|
return QDF_STATUS_E_INVAL;
|
|
@@ -5028,7 +5055,6 @@ static int ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
|
|
|
static void ol_txrx_soc_detach(void *soc)
|
|
|
{
|
|
|
qdf_mem_free(soc);
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/**
|