dp_htt.h 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_HTT_H_
  20. #define _DP_HTT_H_
  21. #include <qdf_types.h>
  22. #include <qdf_lock.h>
  23. #include <qdf_nbuf.h>
  24. #include <htc_api.h>
  25. #include "cdp_txrx_cmn_struct.h"
  26. #include "dp_types.h"
  27. #ifdef HTT_LOGGER
  28. #include "dp_htt_logger.h"
  29. #else
  30. struct htt_logger;
  31. static inline
  32. void htt_interface_logging_init(struct htt_logger **htt_logger_handle,
  33. struct cdp_ctrl_objmgr_psoc *ctrl_psoc)
  34. {
  35. }
  36. static inline
  37. void htt_interface_logging_deinit(struct htt_logger *htt_logger_handle)
  38. {
  39. }
  40. static inline
  41. int htt_command_record(struct htt_logger *h, uint8_t msg_type,
  42. uint8_t *msg_data)
  43. {
  44. return 0;
  45. }
  46. static inline
  47. int htt_event_record(struct htt_logger *h, uint8_t msg_type,
  48. uint8_t *msg_data)
  49. {
  50. return 0;
  51. }
  52. static inline
  53. int htt_wbm_event_record(struct htt_logger *h, uint8_t tx_status,
  54. uint8_t *msg_data)
  55. {
  56. return 0;
  57. }
  58. #endif
  59. #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
  60. #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
  61. #define HTT_SHIFT_UPPER_TIMESTAMP 32
  62. #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
  63. void htt_htc_pkt_pool_free(struct htt_soc *soc);
  64. #define HTT_TX_MUTEX_TYPE qdf_spinlock_t
  65. #define HTT_TX_MUTEX_INIT(_mutex) \
  66. qdf_spinlock_create(_mutex)
  67. #define HTT_TX_MUTEX_ACQUIRE(_mutex) \
  68. qdf_spin_lock_bh(_mutex)
  69. #define HTT_TX_MUTEX_RELEASE(_mutex) \
  70. qdf_spin_unlock_bh(_mutex)
  71. #define HTT_TX_MUTEX_DESTROY(_mutex) \
  72. qdf_spinlock_destroy(_mutex)
  73. #define DP_HTT_MAX_SEND_QUEUE_DEPTH 64
  74. #ifndef HTT_MAC_ADDR_LEN
  75. #define HTT_MAC_ADDR_LEN 6
  76. #endif
  77. #define HTT_FRAMECTRL_TYPE_MASK 0x0C
  78. #define HTT_GET_FRAME_CTRL_TYPE(_val) \
  79. (((_val) & HTT_FRAMECTRL_TYPE_MASK) >> 2)
  80. #define FRAME_CTRL_TYPE_MGMT 0x0
  81. #define FRAME_CTRL_TYPE_CTRL 0x1
  82. #define FRAME_CTRL_TYPE_DATA 0x2
  83. #define FRAME_CTRL_TYPE_RESV 0x3
  84. #define HTT_FRAMECTRL_DATATYPE 0x08
  85. #define HTT_PPDU_DESC_MAX_DEPTH 16
  86. #define DP_SCAN_PEER_ID 0xFFFF
  87. #define HTT_RX_DELBA_WIN_SIZE_M 0x0000FC00
  88. #define HTT_RX_DELBA_WIN_SIZE_S 10
  89. #define HTT_RX_DELBA_WIN_SIZE_GET(word) \
  90. (((word) & HTT_RX_DELBA_WIN_SIZE_M) >> HTT_RX_DELBA_WIN_SIZE_S)
  91. /*
  92. * Set the base misclist size to HTT copy engine source ring size
  93. * to guarantee that a packet on the misclist wont be freed while it
  94. * is sitting in the copy engine.
  95. */
  96. #define DP_HTT_HTC_PKT_MISCLIST_SIZE 2048
  97. #define HTT_T2H_MAX_MSG_SIZE 2048
  98. #define HTT_T2H_EXT_STATS_TLV_START_OFFSET 3
  99. /*
  100. * Below offset are based on htt_ppdu_stats_common_tlv
  101. * defined in htt_ppdu_stats.h
  102. */
  103. #define HTT_PPDU_STATS_COMMON_TLV_TLV_HDR_OFFSET 0
  104. #define HTT_PPDU_STATS_COMMON_TLV_PPDU_ID_OFFSET 1
  105. #define HTT_PPDU_STATS_COMMON_TLV_RING_ID_SCH_CMD_ID_OFFSET 2
  106. #define HTT_PPDU_STATS_COMMON_TLV_QTYPE_FRM_TYPE_OFFSET 3
  107. #define HTT_PPDU_STATS_COMMON_TLV_CHAIN_MASK_OFFSET 4
  108. #define HTT_PPDU_STATS_COMMON_TLV_FES_DUR_US_OFFSET 5
  109. #define HTT_PPDU_STATS_COMMON_TLV_SCH_EVAL_START_TSTMP_L32_US_OFFSET 6
  110. #define HTT_PPDU_STATS_COMMON_TLV_SCH_END_TSTMP_US_OFFSET 7
  111. #define HTT_PPDU_STATS_COMMON_TLV_START_TSTMP_L32_US_OFFSET 8
  112. #define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_PHY_MODE_OFFSET 9
  113. #define HTT_PPDU_STATS_COMMON_TLV_CCA_DELTA_TIME_US_OFFSET 10
  114. #define HTT_PPDU_STATS_COMMON_TLV_RXFRM_DELTA_TIME_US_OFFSET 11
  115. #define HTT_PPDU_STATS_COMMON_TLV_TXFRM_DELTA_TIME_US_OFFSET 12
  116. #define HTT_PPDU_STATS_COMMON_TLV_RESV_NUM_UL_BEAM_OFFSET 13
  117. #define HTT_PPDU_STATS_COMMON_TLV_START_TSTMP_U32_US_OFFSET 14
  118. #define HTT_PPDU_STATS_COMMON_TLV_BSSCOLOR_OBSS_PSR_OFFSET 15
  119. /* get index for field in htt_ppdu_stats_common_tlv */
  120. #define HTT_GET_STATS_CMN_INDEX(index) \
  121. HTT_PPDU_STATS_COMMON_TLV_##index##_OFFSET
  122. #define HTT_VDEV_STATS_TLV_SOC_DROP_CNT_OFFSET 1
  123. #define HTT_VDEV_STATS_TLV_HDR_OFFSET 0
  124. #define HTT_VDEV_STATS_TLV_VDEV_ID_OFFSET 1
  125. #define HTT_VDEV_STATS_TLV_RX_BYTE_CNT_OFFSET 2
  126. #define HTT_VDEV_STATS_TLV_RX_PKT_CNT_OFFSET 4
  127. #define HTT_VDEV_STATS_TLV_TX_SUCCESS_BYTE_CNT_OFFSET 6
  128. #define HTT_VDEV_STATS_TLV_TX_SUCCESS_PKT_CNT_OFFSET 8
  129. #define HTT_VDEV_STATS_TLV_TX_RETRY_PKT_CNT_OFFSET 10
  130. #define HTT_VDEV_STATS_TLV_TX_DROP_PKT_CNT_OFFSET 12
  131. #define HTT_VDEV_STATS_TLV_TX_AGE_OUT_PKT_CNT_OFFSET 14
  132. #define HTT_VDEV_STATS_TLV_TX_RETRY_BYTE_CNT_OFFSET 16
  133. #define HTT_VDEV_STATS_TLV_TX_DROP_BYTE_CNT_OFFSET 18
  134. #define HTT_VDEV_STATS_TLV_TX_AGE_OUT_BYTE_CNT_OFFSET 20
  135. #define HTT_VDEV_STATS_GET_INDEX(index) \
  136. HTT_VDEV_STATS_TLV_##index##_OFFSET
  137. #define HTT_VDEV_STATS_U32_SHIFT 0x20
  138. #define HTT_VDEV_STATS_U32_MASK 0xFFFFFFFF00000000
  139. #define HTT_VDEV_STATS_L32_MASK 0x00000000FFFFFFFF
  140. #define HTT_VDEV_GET_STATS_U64(msg_word) \
  141. (((((uint64_t)(*(((uint32_t *)msg_word) + 1))) & HTT_VDEV_STATS_L32_MASK) << \
  142. HTT_VDEV_STATS_U32_SHIFT) | ((*(uint32_t *)msg_word) & HTT_VDEV_STATS_L32_MASK))
  143. #define HTT_VDEV_GET_STATS_U32(msg_word) \
  144. ((*(uint32_t *)msg_word) & HTT_VDEV_STATS_L32_MASK)
  145. #define MAX_SCHED_STARVE 100000
  146. #define WRAP_DROP_TSF_DELTA 10000
  147. #define MAX_TSF_32 0xFFFFFFFF
  148. #define dp_htt_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_HTT, params)
  149. #define dp_htt_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_HTT, params)
  150. #define dp_htt_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_HTT, params)
  151. #define dp_htt_info(params...) \
  152. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_HTT, ## params)
  153. #define dp_htt_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_HTT, params)
  154. #define dp_htt_tx_stats_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_HTT_TX_STATS, params)
  155. #define dp_htt_tx_stats_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_HTT_TX_STATS, params)
  156. #define dp_htt_tx_stats_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_HTT_TX_STATS, params)
  157. #define dp_htt_tx_stats_info(params...) \
  158. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_HTT_TX_STATS, ## params)
  159. #define dp_htt_tx_stats_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_HTT_TX_STATS, params)
  160. #define RXMON_GLOBAL_EN_SHIFT 28
  161. /**
  162. * enum dp_full_mon_config - enum to enable/disable full monitor mode
  163. *
  164. * @DP_FULL_MON_DISABLE: Disable full monitor mode
  165. * @DP_FULL_MON_ENABLE: Enable full monitor mode
  166. */
  167. enum dp_full_mon_config {
  168. DP_FULL_MON_DISABLE,
  169. DP_FULL_MON_ENABLE,
  170. };
  171. struct dp_htt_htc_pkt {
  172. void *soc_ctxt;
  173. qdf_dma_addr_t nbuf_paddr;
  174. HTC_PACKET htc_pkt;
  175. };
  176. struct dp_htt_htc_pkt_union {
  177. union {
  178. struct dp_htt_htc_pkt pkt;
  179. struct dp_htt_htc_pkt_union *next;
  180. } u;
  181. };
  182. struct dp_htt_timestamp {
  183. long *umac_ttt;
  184. long *lmac_ttt;
  185. };
  186. struct htt_soc {
  187. struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
  188. struct dp_soc *dp_soc;
  189. hal_soc_handle_t hal_soc;
  190. struct dp_htt_timestamp pdevid_tt[MAX_PDEV_CNT];
  191. /* htt_logger handle */
  192. struct htt_logger *htt_logger_handle;
  193. HTC_HANDLE htc_soc;
  194. qdf_device_t osdev;
  195. HTC_ENDPOINT_ID htc_endpoint;
  196. struct dp_htt_htc_pkt_union *htt_htc_pkt_freelist;
  197. struct dp_htt_htc_pkt_union *htt_htc_pkt_misclist;
  198. struct {
  199. u_int8_t major;
  200. u_int8_t minor;
  201. } tgt_ver;
  202. struct {
  203. u_int8_t major;
  204. u_int8_t minor;
  205. } wifi_ip_ver;
  206. struct {
  207. int htc_err_cnt;
  208. int htc_pkt_free;
  209. int skip_count;
  210. int fail_count;
  211. /* rtpm put skip count for ver req msg */
  212. int htt_ver_req_put_skip;
  213. } stats;
  214. HTT_TX_MUTEX_TYPE htt_tx_mutex;
  215. };
  216. #ifdef QCA_MONITOR_2_0_SUPPORT
  217. /**
  218. * struct dp_tx_mon_downstream_tlv_config - Enable/Disable TxMon
  219. * downstream TLVs
  220. * tx_fes_setup: TX_FES_SETUP TLV
  221. * tx_peer_entry: TX_PEER_ENTRY TLV
  222. * tx_queue_extension: TX_QUEUE_EXTENSION TLV
  223. * tx_last_mpdu_end: TX_LAST_MPDU_END TLV
  224. * tx_last_mpdu_fetched: TX_LAST_MPDU_FETCHED TLV
  225. * tx_data_sync: TX_DATA_SYNC TLV
  226. * pcu_ppdu_setup_init: PCU_PPDU_SETUP_INIT TLV
  227. * fw2s_mon: FW2S_MON TLV
  228. * tx_loopback_setup: TX_LOOPBACK_SETUP TLV
  229. * sch_critical_tlv_ref: SCH_CRITICAL_TLV_REF TLV
  230. * ndp_preamble_done: NDP_PREAMBLE_DONE TLV
  231. * tx_raw_frame_setup: TX_RAW_OR_NATIVE_FRAME_SETUP TLV
  232. * txpcu_user_setup: TXPCU_USER_SETUP TLV
  233. * rxpcu_setup: RXPCU_SETUP TLV
  234. * rxpcu_setup_complete: RXPCU_SETUP_COMPLETE TLV
  235. * coex_tx_req: COEX_TX_REQ TLV
  236. * rxpcu_user_setup: RXPCU_USER_SETUP TLV
  237. * rxpcu_user_setup_ext: RXPCU_USER_SETUP_EXT TLV
  238. * wur_data: WUR_DATA TLV
  239. * tqm_mpdu_global_start: TQM_MPDU_GLOBAL_START
  240. * tx_fes_setup_complete: TX_FES_SETUP_COMPLETE TLV
  241. * scheduler_end: SCHEDULER_END TLV
  242. * sch_wait_instr_tx_path: SCH_WAIT_INSTR_TX_PATH TLV
  243. *
  244. */
  245. struct dp_tx_mon_downstream_tlv_config {
  246. uint32_t tx_fes_setup:1,
  247. tx_peer_entry:1,
  248. tx_queue_extension:1,
  249. tx_last_mpdu_end:1,
  250. tx_last_mpdu_fetched:1,
  251. tx_data_sync:1,
  252. pcu_ppdu_setup_init:1,
  253. fw2s_mon:1,
  254. tx_loopback_setup:1,
  255. sch_critical_tlv_ref:1,
  256. ndp_preamble_done:1,
  257. tx_raw_frame_setup:1,
  258. txpcu_user_setup:1,
  259. rxpcu_setup:1,
  260. rxpcu_setup_complete:1,
  261. coex_tx_req:1,
  262. rxpcu_user_setup:1,
  263. rxpcu_user_setup_ext:1,
  264. wur_data:1,
  265. tqm_mpdu_global_start:1,
  266. tx_fes_setup_complete:1,
  267. scheduler_end:1,
  268. sch_wait_instr_tx_path:1;
  269. };
  270. /**
  271. * struct dp_tx_mon_upstream_tlv_config - Enable/Disable TxMon
  272. * upstream TLVs
  273. * rx_response_required_info: RX_RESPONSE_REQUIRED_INFO
  274. * TLV
  275. * response_start_status: RESPONSE_START_STATUS TLV
  276. * response_end_status: RESPONSE_END_STATUS TLV
  277. * tx_fes_status_start: TX_FES_STATUS_START TLV
  278. * tx_fes_status_start_ppdu: TX_FES_STATUS_START_PPDU TLV
  279. * tx_fes_status_user_ppdu: TX_FES_STATUS_USER_PPDU TLV
  280. * tx_fes_status_ack_or_ba: TX_FES_STATUS_ACK_OR_BA TLV
  281. * tx_fes_status_1k_ba: TX_FES_STATUS_1K_BA TLV
  282. * tx_fes_status_start_prot: TX_FES_STATUS_START_PROTO TLV
  283. * tx_fes_status_user_response: TX_FES_STATUS_USER_RESPONSE TLV
  284. * rx_frame_bitmap_ack: RX_FRAME_BITMAP_ACK TLV
  285. * rx_frame_1k_bitmap_ack: RX_FRAME_1K_BITMAP_ACK TLV
  286. * coex_tx_status: COEX_TX_STATUS TLV
  287. * recevied_response_info: RECEIVED_RESPONSE_INFO TLV
  288. * recevied_response_info_p2: RECEIVED_RESPONSE_INFO_PART2 TLV
  289. * ofdma_trigger_details: OFDMA_TRIGGER_DETAILS
  290. * recevied_trigger_info: RECEIVED_TRIGGER_INFO
  291. * pdg_tx_request: PDG_TX_REQUEST
  292. * pdg_response: PDG_RESPONSE
  293. * pdg_trig_response: PDG_TRIG_RESPONSE
  294. * trigger_response_tx_done: TRIGGER_RESPONSE_TX_DONE
  295. * prot_tx_end: PROT_TX_END
  296. * ppdu_tx_end: PPDU_TX_END
  297. * r2r_status_end: R2R_STATUS_END
  298. * flush_req: FLUSH_REQ
  299. * mactx_phy_desc: MACTX_PHY_DESC
  300. * mactx_user_desc_cmn: MACTX_USER_DESC_COMMON
  301. * mactx_user_desc_per_usr: MACTX_USER_DESC_PER_USER
  302. * tqm_acked_1k_mpdu: TQM_ACKED_1K_MPDU
  303. * tqm_acked_mpdu: TQM_ACKED_MPDU
  304. * tqm_update_tx_mpdu_count: TQM_UPDATE_TX_MPDU_COUNT
  305. * phytx_ppdu_header_info_request: PHYTX_PPDU_HEADER_INFO_REQUEST
  306. * u_sig_eht_su_mu: U_SIG_EHT_SU_MU
  307. * u_sig_eht_su: U_SIG_EHT_SU
  308. * eht_sig_usr_su: EHT_SIG_USR_SU
  309. * eht_sig_usr_mu_mimo: EHT_SIG_USR_MU_MIMO
  310. * eht_sig_usr_ofdma: EHT_SIG_USR_MU_MIMO
  311. * he_sig_a_su: HE_SIG_A_SU
  312. * he_sig_a_mu_dl: HE_SIG_A_MU_DL
  313. * he_sig_a_mu_ul: HE_SIG_A_MU_UL
  314. * he_sig_b1_mu: HE_SIG_B1_MU
  315. * he_sig_b2_mu: HE_SIG_B2_MU
  316. * he_sig_b2_ofdma: HE_SIG_B2_OFDMA
  317. * vht_sig_b_mu160: VHT_SIG_B_MU160
  318. * vht_sig_b_mu80: VHT_SIG_B_MU80
  319. * vht_sig_b_mu40: VHT_SIG_B_MU40
  320. * vht_sig_b_mu20: VHT_SIG_B_MU20
  321. * vht_sig_b_su160: VHT_SIG_B_SU160
  322. * vht_sig_b_su80: VHT_SIG_B_SU80
  323. * vht_sig_b_su40: VHT_SIG_B_SU40
  324. * vht_sig_b_su20: VHT_SIG_B_SU20
  325. * vht_sig_a: VHT_SIG_A
  326. * ht_sig: HT_SIG
  327. * l_sig_b: L_SIG_B
  328. * l_sig_a: L_SIG_A
  329. * tx_service: TX_SERVICE
  330. * txpcu_buf_status: TXPCU_BUFFER_STATUS
  331. * txpcu_user_buf_status: TXPCU_USER_BUFFER_STATUS
  332. * txdma_stop_request: TXDMA_STOP_REQUEST
  333. * expected_response: EXPECTED_RESPONSE
  334. * tx_mpdu_count_transfer_end: TX_MPDU_COUNT_TRANSFER_END
  335. * rx_trig_info: RX_TRIG_INFO
  336. * rxpcu_tx_setup_clear: RXPCU_TX_SETUP_CLEAR
  337. * rx_frame_bitmap_req: RX_FRAME_BITMAP_REQ
  338. * rx_phy_sleep: RX_PHY_SLEEP
  339. * txpcu_preamble_done: TXPCU_PREAMBLE_DONE
  340. * txpcu_phytx_debug32: TXPCU_PHYTX_DEBUG32
  341. * txpcu_phytx_other_transmit_info32: TXPCU_PHYTX_OTHER_TRANSMIT_INFO32
  342. * rx_ppdu_noack_report: RX_PPDU_NO_ACK_REPORT
  343. * rx_ppdu_ack_report: RX_PPDU_ACK_REPORT
  344. * coex_rx_status: COEX_RX_STATUS
  345. * rx_start_param: RX_START_PARAM
  346. * tx_cbf_info: TX_CBF_INFO
  347. * rxpcu_early_rx_indication: RXPCU_EARLY_RX_INDICATION
  348. * received_response_user_7_0: RECEIVED_RESPONSE_USER_7_0
  349. * received_response_user_15_8: RECEIVED_RESPONSE_USER_15_8
  350. * received_response_user_23_16: RECEIVED_RESPONSE_USER_23_16
  351. * received_response_user_31_24: RECEIVED_RESPONSE_USER_31_24
  352. * received_response_user_36_32: RECEIVED_RESPONSE_USER_36_32
  353. * rx_pm_info: RX_PM_INFO
  354. * rx_preamble: RX_PREAMBLE
  355. * others: OTHERS
  356. * mactx_pre_phy_desc: MACTX_PRE_PHY_DESC
  357. *
  358. */
  359. struct dp_tx_mon_upstream_tlv_config {
  360. uint32_t rx_response_required_info:1,
  361. response_start_status:1,
  362. response_end_status:1,
  363. tx_fes_status_start:1,
  364. tx_fes_status_end:1,
  365. tx_fes_status_start_ppdu:1,
  366. tx_fes_status_user_ppdu:1,
  367. tx_fes_status_ack_or_ba:1,
  368. tx_fes_status_1k_ba:1,
  369. tx_fes_status_start_prot:1,
  370. tx_fes_status_prot:1,
  371. tx_fes_status_user_response:1,
  372. rx_frame_bitmap_ack:1,
  373. rx_frame_1k_bitmap_ack:1,
  374. coex_tx_status:1,
  375. recevied_response_info:1,
  376. recevied_response_info_p2:1,
  377. ofdma_trigger_details:1,
  378. recevied_trigger_info:1,
  379. pdg_tx_request:1,
  380. pdg_response:1,
  381. pdg_trig_response:1,
  382. trigger_response_tx_done:1,
  383. prot_tx_end:1,
  384. ppdu_tx_end:1,
  385. r2r_status_end:1,
  386. flush_req:1,
  387. mactx_phy_desc:1,
  388. mactx_user_desc_cmn:1,
  389. mactx_user_desc_per_usr:1;
  390. uint32_t tqm_acked_1k_mpdu:1,
  391. tqm_acked_mpdu:1,
  392. tqm_update_tx_mpdu_count:1,
  393. phytx_ppdu_header_info_request:1,
  394. u_sig_eht_su_mu:1,
  395. u_sig_eht_su:1,
  396. u_sig_eht_tb:1,
  397. eht_sig_usr_su:1,
  398. eht_sig_usr_mu_mimo:1,
  399. eht_sig_usr_ofdma:1,
  400. he_sig_a_su:1,
  401. he_sig_a_mu_dl:1,
  402. he_sig_a_mu_ul:1,
  403. he_sig_b1_mu:1,
  404. he_sig_b2_mu:1,
  405. he_sig_b2_ofdma:1,
  406. vht_sig_b_mu160:1,
  407. vht_sig_b_mu80:1,
  408. vht_sig_b_mu40:1,
  409. vht_sig_b_mu20:1,
  410. vht_sig_b_su160:1,
  411. vht_sig_b_su80:1,
  412. vht_sig_b_su40:1,
  413. vht_sig_b_su20:1,
  414. vht_sig_a:1,
  415. ht_sig:1,
  416. l_sig_b:1,
  417. l_sig_a:1,
  418. tx_service:1;
  419. uint32_t txpcu_buf_status:1,
  420. txpcu_user_buf_status:1,
  421. txdma_stop_request:1,
  422. expected_response:1,
  423. tx_mpdu_count_transfer_end:1,
  424. rx_trig_info:1,
  425. rxpcu_tx_setup_clear:1,
  426. rx_frame_bitmap_req:1,
  427. rx_phy_sleep:1,
  428. txpcu_preamble_done:1,
  429. txpcu_phytx_debug32:1,
  430. txpcu_phytx_other_transmit_info32:1,
  431. rx_ppdu_noack_report:1,
  432. rx_ppdu_ack_report:1,
  433. coex_rx_status:1,
  434. rx_start_param:1,
  435. tx_cbf_info:1,
  436. rxpcu_early_rx_indication:1,
  437. received_response_user_7_0:1,
  438. received_response_user_15_8:1,
  439. received_response_user_23_16:1,
  440. received_response_user_31_24:1,
  441. received_response_user_36_32:1,
  442. rx_pm_info:1,
  443. rx_preamble:1,
  444. others:1,
  445. mactx_pre_phy_desc:1;
  446. };
  447. /**
  448. * struct dp_tx_mon_wordmask_config - Tx monitor word mask
  449. * tx_fes_setup: TX_FES_SETUP TLV word mask
  450. * tx_peer_entry: TX_PEER_ENTRY TLV word mask
  451. * tx_queue_ext: TX_QUEUE_EXTENSION TLV word mask
  452. * tx_msdu_start: TX_MSDU_START TLV word mask
  453. * tx_mpdu_start: TX_MPDU_START TLV word mask
  454. * pcu_ppdu_setup_init: PCU_PPDU_SETUP TLV word mask
  455. * rxpcu_user_setup: RXPCU_USER_SETUP TLV word mask
  456. */
  457. struct dp_tx_mon_wordmask_config {
  458. uint16_t tx_fes_setup;
  459. uint16_t tx_peer_entry;
  460. uint16_t tx_queue_ext;
  461. uint16_t tx_msdu_start;
  462. uint16_t tx_mpdu_start;
  463. uint32_t pcu_ppdu_setup_init;
  464. uint16_t rxpcu_user_setup;
  465. };
  466. /**
  467. * struct htt_tx_ring_tlv_filter - Tx ring TLV filter
  468. * enable/disable.
  469. * @dtlvs: enable/disable downstream TLVs
  470. * @utlvs: enable/disable upstream TLVs
  471. * @wmask: enable/disbale word mask subscription
  472. * @mgmt_filter: enable/disable mgmt packets
  473. * @data_filter: enable/disable data packets
  474. * @ctrl_filter: enable/disable ctrl packets
  475. * @mgmt_dma_length: configure length for mgmt packet
  476. * @ctrl_dma_length: configure length for ctrl packet
  477. * @data_dma_length: configure length for data packet
  478. * @mgmt_mpdu_end: enable mpdu end tlv for mgmt
  479. * @mgmt_msdu_end: enable msdu end tlv for mgmt
  480. * @mgmt_msdu_start: enable msdu start tlv for mgmt
  481. * @mgmt_mpdu_start: enable mpdu start tlv for mgmt
  482. * @ctrl_mpdu_end: enable mpdu end tlv for ctrl
  483. * @ctrl_msdu_end: enable msdu end tlv for ctrl
  484. * @ctrl_msdu_start: enable msdu start tlv for ctrl
  485. * @ctrl_mpdu_start: enable mpdu start tlv for ctrl
  486. * @data_mpdu_end: enable mpdu end tlv for data
  487. * @data_msdu_end: enable msdu end tlv for data
  488. * @data_msdu_start: enable msdu start tlv for data
  489. * @data_mpdu_start: enable mpdu start tlv for data
  490. * @mgmt_mpdu_log: enable mgmt mpdu level logging
  491. * @ctrl_mpdu_log: enable ctrl mpdu level logging
  492. * @data_mpdu_log: enable data mpdu level logging
  493. * @enable: enable tx monitor
  494. *
  495. * NOTE: Do not change the layout of this structure
  496. */
  497. struct htt_tx_ring_tlv_filter {
  498. struct dp_tx_mon_downstream_tlv_config dtlvs;
  499. struct dp_tx_mon_upstream_tlv_config utlvs;
  500. struct dp_tx_mon_wordmask_config wmask;
  501. uint16_t mgmt_filter;
  502. uint16_t data_filter;
  503. uint16_t ctrl_filter;
  504. uint16_t mgmt_dma_length:3,
  505. ctrl_dma_length:3,
  506. data_dma_length:3;
  507. uint16_t mgmt_mpdu_end:1,
  508. mgmt_msdu_end:1,
  509. mgmt_msdu_start:1,
  510. mgmt_mpdu_start:1,
  511. ctrl_mpdu_end:1,
  512. ctrl_msdu_end:1,
  513. ctrl_msdu_start:1,
  514. ctrl_mpdu_start:1,
  515. data_mpdu_end:1,
  516. data_msdu_end:1,
  517. data_msdu_start:1,
  518. data_mpdu_start:1;
  519. uint8_t mgmt_mpdu_log:1,
  520. ctrl_mpdu_log:1,
  521. data_mpdu_log:1;
  522. uint8_t enable:1;
  523. };
  524. #endif /* QCA_MONITOR_2_0_SUPPORT */
  525. /**
  526. * struct htt_rx_ring_tlv_filter - Rx ring TLV filter
  527. * enable/disable.
  528. * @mpdu_start: enable/disable MPDU start TLV
  529. * @msdu_start: enable/disable MSDU start TLV
  530. * @packet: enable/disable PACKET TLV
  531. * @msdu_end: enable/disable MSDU end TLV
  532. * @mpdu_end: enable/disable MPDU end TLV
  533. * @packet_header: enable/disable PACKET header TLV
  534. * @attention: enable/disable ATTENTION TLV
  535. * @ppdu_start: enable/disable PPDU start TLV
  536. * @ppdu_end: enable/disable PPDU end TLV
  537. * @ppdu_end_user_stats: enable/disable PPDU user stats TLV
  538. * @ppdu_end_user_stats_ext: enable/disable PPDU user stats ext TLV
  539. * @ppdu_end_status_done: enable/disable PPDU end status done TLV
  540. * @enable_fp: enable/disable FP packet
  541. * @enable_md: enable/disable MD packet
  542. * @enable_mo: enable/disable MO packet
  543. * @enable_mgmt: enable/disable MGMT packet
  544. * @enable_ctrl: enable/disable CTRL packet
  545. * @enable_data: enable/disable DATA packet
  546. * @offset_valid: Flag to indicate if below offsets are valid
  547. * @rx_packet_offset: Offset of packet payload
  548. * @rx_header_offset: Offset of rx_header tlv
  549. * @rx_mpdu_end_offset: Offset of rx_mpdu_end tlv
  550. * @rx_mpdu_start_offset: Offset of rx_mpdu_start tlv
  551. * @rx_msdu_end_offset: Offset of rx_msdu_end tlv
  552. * @rx_msdu_start_offset: Offset of rx_msdu_start tlv
  553. * @rx_attn_offset: Offset of rx_attention tlv
  554. * @fp_phy_err: Flag to indicate FP PHY status tlv
  555. * @fp_phy_err_buf_src: source ring selection for the FP PHY ERR status tlv
  556. * @fp_phy_err_buf_dest: dest ring selection for the FP PHY ERR status tlv
  557. * @phy_err_mask: select the phy errors defined in phyrx_abort_request_reason
  558. * enums 0 to 31.
  559. * @phy_err_mask_cont: select the fp phy errors defined in
  560. * phyrx_abort_request_reason enums 32 to 63
  561. * @rx_mpdu_start_wmask: word mask for mpdu start tlv
  562. * @rx_mpdu_end_wmask: word mask for mpdu end tlv
  563. * @rx_msdu_end_tlv: word mask for msdu end tlv
  564. * @rx_pkt_tlv_offset: rx pkt tlv offset
  565. * @mgmt_dma_length: configure length for mgmt packet
  566. * @ctrl_dma_length: configure length for ctrl packet
  567. * @data_dma_length: configure length for data packet
  568. * @mgmt_mpdu_log: enable mgmt mpdu level logging
  569. * @ctrl_mpdu_log: enable ctrl mpdu level logging
  570. * @data_mpdu_log: enable data mpdu level logging
  571. * @enable: enable rx monitor
  572. *
  573. * NOTE: Do not change the layout of this structure
  574. */
  575. struct htt_rx_ring_tlv_filter {
  576. u_int32_t mpdu_start:1,
  577. msdu_start:1,
  578. packet:1,
  579. msdu_end:1,
  580. mpdu_end:1,
  581. packet_header:1,
  582. attention:1,
  583. ppdu_start:1,
  584. ppdu_end:1,
  585. ppdu_end_user_stats:1,
  586. ppdu_end_user_stats_ext:1,
  587. ppdu_end_status_done:1,
  588. header_per_msdu:1,
  589. enable_fp:1,
  590. enable_md:1,
  591. enable_mo:1;
  592. u_int32_t fp_mgmt_filter:16,
  593. mo_mgmt_filter:16;
  594. u_int32_t fp_ctrl_filter:16,
  595. mo_ctrl_filter:16;
  596. u_int32_t fp_data_filter:16,
  597. mo_data_filter:16;
  598. u_int16_t md_data_filter;
  599. u_int16_t md_mgmt_filter;
  600. u_int16_t md_ctrl_filter;
  601. bool offset_valid;
  602. uint16_t rx_packet_offset;
  603. uint16_t rx_header_offset;
  604. uint16_t rx_mpdu_end_offset;
  605. uint16_t rx_mpdu_start_offset;
  606. uint16_t rx_msdu_end_offset;
  607. uint16_t rx_msdu_start_offset;
  608. uint16_t rx_attn_offset;
  609. #ifdef QCA_UNDECODED_METADATA_SUPPORT
  610. u_int32_t fp_phy_err:1,
  611. fp_phy_err_buf_src:2,
  612. fp_phy_err_buf_dest:2,
  613. phy_err_filter_valid:1;
  614. u_int32_t phy_err_mask;
  615. u_int32_t phy_err_mask_cont;
  616. #endif
  617. #ifdef QCA_MONITOR_2_0_SUPPORT
  618. uint16_t rx_mpdu_start_wmask;
  619. uint16_t rx_mpdu_end_wmask;
  620. uint16_t rx_msdu_end_wmask;
  621. uint16_t rx_pkt_tlv_offset;
  622. uint16_t mgmt_dma_length:3,
  623. ctrl_dma_length:3,
  624. data_dma_length:3,
  625. mgmt_mpdu_log:1,
  626. ctrl_mpdu_log:1,
  627. data_mpdu_log:1,
  628. enable:1;
  629. #endif
  630. uint8_t rx_mon_global_en;
  631. };
  632. /**
  633. * struct dp_htt_rx_flow_fst_setup - Rx FST setup message
  634. * @pdev_id: DP Pdev identifier
  635. * @max_entries: Size of Rx FST in number of entries
  636. * @max_search: Number of collisions allowed
  637. * @base_addr_lo: lower 32-bit physical address
  638. * @base_addr_hi: upper 32-bit physical address
  639. * @ip_da_sa_prefix: IPv4 prefix to map to IPv6 address scheme
  640. * @hash_key_len: Rx FST hash key size
  641. * @hash_key: Rx FST Toeplitz hash key
  642. */
  643. struct dp_htt_rx_flow_fst_setup {
  644. uint8_t pdev_id;
  645. uint32_t max_entries;
  646. uint32_t max_search;
  647. uint32_t base_addr_lo;
  648. uint32_t base_addr_hi;
  649. uint32_t ip_da_sa_prefix;
  650. uint32_t hash_key_len;
  651. uint8_t *hash_key;
  652. };
  653. /**
  654. * enum dp_htt_flow_fst_operation - FST related operations allowed
  655. * @DP_HTT_FST_CACHE_OP_NONE: Cache no-op
  656. * @DP_HTT_FST_CACHE_INVALIDATE_ENTRY: Invalidate single cache entry
  657. * @DP_HTT_FST_CACHE_INVALIDATE_FULL: Invalidate entire cache
  658. * @DP_HTT_FST_ENABLE: Bypass FST is enabled
  659. * @DP_HTT_FST_DISABLE: Disable bypass FST
  660. */
  661. enum dp_htt_flow_fst_operation {
  662. DP_HTT_FST_CACHE_OP_NONE,
  663. DP_HTT_FST_CACHE_INVALIDATE_ENTRY,
  664. DP_HTT_FST_CACHE_INVALIDATE_FULL,
  665. DP_HTT_FST_ENABLE,
  666. DP_HTT_FST_DISABLE
  667. };
  668. /**
  669. * struct dp_htt_rx_flow_fst_setup - Rx FST setup message
  670. * @pdev_id: DP Pdev identifier
  671. * @op_code: FST operation to be performed by FW/HW
  672. * @rx_flow: Rx Flow information on which operation is to be performed
  673. */
  674. struct dp_htt_rx_flow_fst_operation {
  675. uint8_t pdev_id;
  676. enum dp_htt_flow_fst_operation op_code;
  677. struct cdp_rx_flow_info *rx_flow;
  678. };
  679. /**
  680. * struct dp_htt_rx_fisa_config - Rx fisa config
  681. * @pdev_id: DP Pdev identifier
  682. * @fisa_timeout: fisa aggregation timeout
  683. */
  684. struct dp_htt_rx_fisa_cfg {
  685. uint8_t pdev_id;
  686. uint32_t fisa_timeout;
  687. };
  688. /*
  689. * htt_htc_pkt_alloc() - Allocate HTC packet buffer
  690. * @htt_soc: HTT SOC handle
  691. *
  692. * Return: Pointer to htc packet buffer
  693. */
  694. struct dp_htt_htc_pkt *htt_htc_pkt_alloc(struct htt_soc *soc);
  695. /*
  696. * htt_htc_pkt_free() - Free HTC packet buffer
  697. * @htt_soc: HTT SOC handle
  698. */
  699. void
  700. htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt);
  701. #define HTT_HTC_PKT_STATUS_SUCCESS \
  702. ((pkt->htc_pkt.Status != QDF_STATUS_E_CANCELED) && \
  703. (pkt->htc_pkt.Status != QDF_STATUS_E_RESOURCES))
  704. #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
  705. static void
  706. htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
  707. {
  708. }
  709. #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
  710. /*
  711. * htt_htc_misc_pkt_list_add() - Add pkt to misc list
  712. * @htt_soc: HTT SOC handle
  713. * @dp_htt_htc_pkt: pkt to be added to list
  714. */
  715. void
  716. htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt);
  717. #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
  718. /**
  719. * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
  720. * @soc : HTT SOC handle
  721. * @pkt: pkt to be send
  722. * @cmd : command to be recorded in dp htt logger
  723. * @buf : Pointer to buffer needs to be recored for above cmd
  724. *
  725. * Return: None
  726. */
  727. static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
  728. struct dp_htt_htc_pkt *pkt,
  729. uint8_t cmd, uint8_t *buf)
  730. {
  731. QDF_STATUS status;
  732. htt_command_record(soc->htt_logger_handle, cmd, buf);
  733. status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
  734. if (status == QDF_STATUS_SUCCESS && HTT_HTC_PKT_STATUS_SUCCESS)
  735. htt_htc_misc_pkt_list_add(soc, pkt);
  736. else
  737. soc->stats.fail_count++;
  738. return status;
  739. }
  740. QDF_STATUS dp_htt_rx_fisa_config(struct dp_pdev *pdev,
  741. struct dp_htt_rx_fisa_cfg *fisa_config);
  742. /*
  743. * htt_soc_initialize() - SOC level HTT initialization
  744. * @htt_soc: Opaque htt SOC handle
  745. * @ctrl_psoc: Opaque ctrl SOC handle
  746. * @htc_soc: SOC level HTC handle
  747. * @hal_soc: Opaque HAL SOC handle
  748. * @osdev: QDF device
  749. *
  750. * Return: HTT handle on success; NULL on failure
  751. */
  752. void *
  753. htt_soc_initialize(struct htt_soc *htt_soc,
  754. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  755. HTC_HANDLE htc_soc,
  756. hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev);
  757. /*
  758. * htt_soc_attach() - attach DP and HTT SOC
  759. * @soc: DP SOC handle
  760. * @htc_hdl: HTC handle
  761. *
  762. * Return: htt_soc handle on Success, NULL on Failure
  763. */
  764. struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_hdl);
  765. /*
  766. * htt_set_htc_handle_() - set HTC handle
  767. * @htt_hdl: HTT handle/SOC
  768. * @htc_soc: HTC handle
  769. *
  770. * Return: None
  771. */
  772. void htt_set_htc_handle(struct htt_soc *htt_hdl, HTC_HANDLE htc_soc);
  773. /*
  774. * htt_get_htc_handle_() - set HTC handle
  775. * @htt_hdl: HTT handle/SOC
  776. *
  777. * Return: HTC_HANDLE
  778. */
  779. HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_hdl);
  780. /*
  781. * htt_soc_htc_dealloc() - HTC memory de-alloc
  782. * @htt_soc: SOC level HTT handle
  783. *
  784. * Return: None
  785. */
  786. void htt_soc_htc_dealloc(struct htt_soc *htt_handle);
  787. /*
  788. * htt_soc_htc_prealloc() - HTC memory prealloc
  789. * @htt_soc: SOC level HTT handle
  790. *
  791. * Return: QDF_STATUS_SUCCESS on success or
  792. * QDF_STATUS_E_NO_MEM on allocation failure
  793. */
  794. QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *htt_soc);
  795. void htt_soc_detach(struct htt_soc *soc);
  796. int htt_srng_setup(struct htt_soc *htt_soc, int pdev_id,
  797. hal_ring_handle_t hal_ring_hdl,
  798. int hal_ring_type);
  799. int htt_soc_attach_target(struct htt_soc *htt_soc);
  800. /*
  801. * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
  802. * config message to target
  803. * @htt_soc: HTT SOC handle
  804. * @pdev_id: PDEV Id
  805. * @hal_srng: Opaque HAL SRNG pointer
  806. * @hal_ring_type: SRNG ring type
  807. * @ring_buf_size: SRNG buffer size
  808. * @htt_tlv_filter: Rx SRNG TLV and filter setting
  809. *
  810. * Return: 0 on success; error code on failure
  811. */
  812. int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
  813. hal_ring_handle_t hal_ring_hdl,
  814. int hal_ring_type, int ring_buf_size,
  815. struct htt_rx_ring_tlv_filter *htt_tlv_filter);
  816. /*
  817. * htt_t2h_stats_handler() - target to host stats work handler
  818. * @context: context (dp soc context)
  819. *
  820. * Return: void
  821. */
  822. void htt_t2h_stats_handler(void *context);
  823. /**
  824. * struct htt_stats_context - htt stats information
  825. * @soc: Size of each descriptor in the pool
  826. * @msg: T2H Ext stats message queue
  827. * @msg_len: T2H Ext stats message length
  828. */
  829. struct htt_stats_context {
  830. struct dp_soc *soc;
  831. qdf_nbuf_queue_t msg;
  832. uint32_t msg_len;
  833. };
  834. /**
  835. * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
  836. * @pdev: DP pdev handle
  837. * @fse_setup_info: FST setup parameters
  838. *
  839. * Return: Success when HTT message is sent, error on failure
  840. */
  841. QDF_STATUS
  842. dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
  843. struct dp_htt_rx_flow_fst_setup *setup_info);
  844. /**
  845. * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
  846. * add/del a flow in HW
  847. * @pdev: DP pdev handle
  848. * @fse_op_info: Flow entry parameters
  849. *
  850. * Return: Success when HTT message is sent, error on failure
  851. */
  852. QDF_STATUS
  853. dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
  854. struct dp_htt_rx_flow_fst_operation *op_info);
  855. /**
  856. * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
  857. *
  858. * @htt_soc: HTT Soc handle
  859. * @pdev_id: Radio id
  860. * @dp_full_mon_config: enabled/disable configuration
  861. *
  862. * Return: Success when HTT message is sent, error on failure
  863. */
  864. int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
  865. uint8_t pdev_id,
  866. enum dp_full_mon_config);
  867. /**
  868. * dp_h2t_hw_vdev_stats_config_send: Send HTT command to FW for config
  869. of HW vdev stats
  870. * @dpsoc: Datapath soc handle
  871. * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
  872. * @enable: flag to specify enable/disable of stats
  873. * @reset: flag to specify if command is for reset of stats
  874. * @reset_bitmask: bitmask of vdev_id(s) for reset of HW stats
  875. *
  876. * Return: QDF_STATUS
  877. */
  878. QDF_STATUS dp_h2t_hw_vdev_stats_config_send(struct dp_soc *dpsoc,
  879. uint8_t pdev_id, bool enable,
  880. bool reset, uint64_t reset_bitmask);
  881. static inline enum htt_srng_ring_id
  882. dp_htt_get_mon_htt_ring_id(struct dp_soc *soc,
  883. enum hal_ring_type hal_ring_type)
  884. {
  885. enum htt_srng_ring_id htt_srng_id = 0;
  886. if (wlan_cfg_get_txmon_hw_support(soc->wlan_cfg_ctx)) {
  887. switch (hal_ring_type) {
  888. case RXDMA_MONITOR_BUF:
  889. htt_srng_id = HTT_RX_MON_HOST2MON_BUF_RING;
  890. break;
  891. case RXDMA_MONITOR_DST:
  892. htt_srng_id = HTT_RX_MON_MON2HOST_DEST_RING;
  893. break;
  894. default:
  895. dp_err("Invalid ring type %d ", hal_ring_type);
  896. break;
  897. }
  898. } else {
  899. switch (hal_ring_type) {
  900. case RXDMA_MONITOR_BUF:
  901. htt_srng_id = HTT_RXDMA_MONITOR_BUF_RING;
  902. break;
  903. case RXDMA_MONITOR_DST:
  904. htt_srng_id = HTT_RXDMA_MONITOR_DEST_RING;
  905. break;
  906. default:
  907. dp_err("Invalid ring type %d ", hal_ring_type);
  908. break;
  909. }
  910. }
  911. return htt_srng_id;
  912. }
  913. #endif /* _DP_HTT_H_ */