|
@@ -118,6 +118,78 @@ ol_txrx_set_wmm_param(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
|
|
/* thresh for peer's cached buf queue beyond which the elements are dropped */
|
|
|
#define OL_TXRX_CACHED_BUFQ_THRESH 128
|
|
|
|
|
|
+#ifdef DP_SUPPORT_RECOVERY_NOTIFY
|
|
|
+static
|
|
|
+int ol_peer_recovery_notifier_cb(struct notifier_block *block,
|
|
|
+ unsigned long state, void *data)
|
|
|
+{
|
|
|
+ struct qdf_notifer_data *notif_data = data;
|
|
|
+ qdf_notif_block *notif_block;
|
|
|
+ struct ol_txrx_peer_t *peer;
|
|
|
+ struct peer_hang_data hang_data;
|
|
|
+ enum peer_debug_id_type dbg_id;
|
|
|
+
|
|
|
+ if (!data || !block)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
|
|
|
+
|
|
|
+ peer = notif_block->priv_data;
|
|
|
+ if (!peer)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (notif_data->offset >= QDF_WLAN_MAX_HOST_OFFSET)
|
|
|
+ return NOTIFY_STOP_MASK;
|
|
|
+
|
|
|
+ QDF_HANG_EVT_SET_HDR(&hang_data.tlv_header,
|
|
|
+ HANG_EVT_TAG_DP_PEER_INFO,
|
|
|
+ QDF_HANG_GET_STRUCT_TLVLEN(struct peer_hang_data));
|
|
|
+
|
|
|
+ qdf_mem_copy(&hang_data.peer_mac_addr, &peer->mac_addr.raw,
|
|
|
+ QDF_MAC_ADDR_SIZE);
|
|
|
+
|
|
|
+ for (dbg_id = 0; dbg_id < PEER_DEBUG_ID_MAX; dbg_id++)
|
|
|
+ if (qdf_atomic_read(&peer->access_list[dbg_id]))
|
|
|
+ hang_data.peer_timeout_bitmask |= (1 << dbg_id);
|
|
|
+
|
|
|
+ qdf_mem_copy(notif_data->hang_data + notif_data->offset,
|
|
|
+ &hang_data, sizeof(struct peer_hang_data));
|
|
|
+ notif_data->offset += sizeof(struct peer_hang_data);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static qdf_notif_block ol_peer_recovery_notifier = {
|
|
|
+ .notif_block.notifier_call = ol_peer_recovery_notifier_cb,
|
|
|
+};
|
|
|
+
|
|
|
+static
|
|
|
+QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer)
|
|
|
+{
|
|
|
+ ol_peer_recovery_notifier.priv_data = peer;
|
|
|
+
|
|
|
+ return qdf_hang_event_register_notifier(&ol_peer_recovery_notifier);
|
|
|
+}
|
|
|
+
|
|
|
+static
|
|
|
+QDF_STATUS ol_unregister_peer_recovery_notifier(void)
|
|
|
+{
|
|
|
+ return qdf_hang_event_unregister_notifier(&ol_peer_recovery_notifier);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline
|
|
|
+QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer)
|
|
|
+{
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
+}
|
|
|
+
|
|
|
+static
|
|
|
+QDF_STATUS ol_unregister_peer_recovery_notifier(void)
|
|
|
+{
|
|
|
+ return QDF_STATUS_SUCCESS;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* ol_tx_mark_first_wakeup_packet() - set flag to indicate that
|
|
|
* fw is compatible for marking first packet after wow wakeup
|
|
@@ -1755,6 +1827,7 @@ static QDF_STATUS ol_txrx_pdev_detach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id
|
|
|
ol_txrx_pdev_grp_stat_destroy(pdev);
|
|
|
|
|
|
ol_txrx_debugfs_exit(pdev);
|
|
|
+ ol_unregister_peer_recovery_notifier();
|
|
|
|
|
|
soc->pdev_list[pdev->id] = NULL;
|
|
|
qdf_mem_free(pdev);
|
|
@@ -3345,64 +3418,6 @@ ol_txrx_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
|
|
return status;
|
|
|
}
|
|
|
|
|
|
-#ifdef DP_SUPPORT_RECOVERY_NOTIFY
|
|
|
-static
|
|
|
-int ol_peer_recovery_notifier_cb(struct notifier_block *block,
|
|
|
- unsigned long state, void *data)
|
|
|
-{
|
|
|
- struct qdf_notifer_data *notif_data = data;
|
|
|
- qdf_notif_block *notif_block;
|
|
|
- struct ol_txrx_peer_t *peer;
|
|
|
- struct peer_hang_data hang_data;
|
|
|
- enum peer_debug_id_type dbg_id;
|
|
|
-
|
|
|
- if (!data || !block)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
|
|
|
-
|
|
|
- peer = notif_block->priv_data;
|
|
|
- if (!peer)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hang_data.peer_timeout_bitmask = 0;
|
|
|
- QDF_HANG_EVT_SET_HDR(&hang_data.tlv_header,
|
|
|
- HANG_EVT_TAG_DP_PEER_INFO,
|
|
|
- QDF_HANG_GET_STRUCT_TLVLEN(struct peer_hang_data));
|
|
|
-
|
|
|
- qdf_mem_copy(&hang_data.peer_mac_addr, &peer->mac_addr.raw,
|
|
|
- QDF_MAC_ADDR_SIZE);
|
|
|
-
|
|
|
- for (dbg_id = 0; dbg_id < PEER_DEBUG_ID_MAX; dbg_id++)
|
|
|
- if (qdf_atomic_read(&peer->access_list[dbg_id]))
|
|
|
- hang_data.peer_timeout_bitmask |= (1 << dbg_id);
|
|
|
-
|
|
|
- qdf_mem_copy(notif_data->hang_data + notif_data->offset,
|
|
|
- &hang_data, sizeof(struct peer_hang_data));
|
|
|
- notif_data->offset += sizeof(struct peer_hang_data);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static qdf_notif_block ol_peer_recovery_notifier = {
|
|
|
- .notif_block.notifier_call = ol_peer_recovery_notifier_cb,
|
|
|
-};
|
|
|
-
|
|
|
-static
|
|
|
-QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer)
|
|
|
-{
|
|
|
- ol_peer_recovery_notifier.priv_data = peer;
|
|
|
-
|
|
|
- return qdf_hang_event_register_notifier(&ol_peer_recovery_notifier);
|
|
|
-}
|
|
|
-#else
|
|
|
-static inline
|
|
|
-QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer)
|
|
|
-{
|
|
|
- return QDF_STATUS_SUCCESS;
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
/**
|
|
|
* peer_unmap_timer_handler() - peer unmap timer function
|
|
|
* @data: peer object pointer
|