dp_tx.h 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_TX_H
  20. #define __DP_TX_H
  21. #include <qdf_types.h>
  22. #include <qdf_nbuf.h>
  23. #include "dp_types.h"
  24. #ifdef FEATURE_PERPKT_INFO
  25. #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
  26. defined(QCA_TX_CAPTURE_SUPPORT) || \
  27. defined(QCA_MCOPY_SUPPORT)
  28. #include "if_meta_hdr.h"
  29. #endif
  30. #endif
  31. #include "dp_internal.h"
  32. #include "hal_tx.h"
  33. #include <qdf_tracepoint.h>
  34. #ifdef CONFIG_SAWF
  35. #include "dp_sawf.h"
  36. #endif
  37. #include <qdf_pkt_add_timestamp.h>
  38. #define DP_INVALID_VDEV_ID 0xFF
  39. #define DP_TX_MAX_NUM_FRAGS 6
  40. /*
  41. * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
  42. * please do not change this flag's definition
  43. */
  44. #define DP_TX_DESC_FLAG_FRAG 0x1
  45. #define DP_TX_DESC_FLAG_TO_FW 0x2
  46. #define DP_TX_DESC_FLAG_SIMPLE 0x4
  47. #define DP_TX_DESC_FLAG_RAW 0x8
  48. #define DP_TX_DESC_FLAG_MESH 0x10
  49. #define DP_TX_DESC_FLAG_QUEUED_TX 0x20
  50. #define DP_TX_DESC_FLAG_COMPLETED_TX 0x40
  51. #define DP_TX_DESC_FLAG_ME 0x80
  52. #define DP_TX_DESC_FLAG_TDLS_FRAME 0x100
  53. #define DP_TX_DESC_FLAG_ALLOCATED 0x200
  54. #define DP_TX_DESC_FLAG_MESH_MODE 0x400
  55. #define DP_TX_DESC_FLAG_UNMAP_DONE 0x800
  56. #define DP_TX_DESC_FLAG_TX_COMP_ERR 0x1000
  57. #define DP_TX_DESC_FLAG_FLUSH 0x2000
  58. #define DP_TX_DESC_FLAG_TRAFFIC_END_IND 0x4000
  59. #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
  60. #define DP_TX_FREE_SINGLE_BUF(soc, buf) \
  61. do { \
  62. qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \
  63. qdf_nbuf_free(buf); \
  64. } while (0)
  65. #define OCB_HEADER_VERSION 1
  66. #ifdef TX_PER_PDEV_DESC_POOL
  67. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  68. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  69. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  70. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
  71. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  72. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  73. #else
  74. #ifdef TX_PER_VDEV_DESC_POOL
  75. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  76. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  77. #endif /* TX_PER_VDEV_DESC_POOL */
  78. #endif /* TX_PER_PDEV_DESC_POOL */
  79. #define DP_TX_QUEUE_MASK 0x3
  80. #define MAX_CDP_SEC_TYPE 12
  81. /* number of dwords for htt_tx_msdu_desc_ext2_t */
  82. #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
  83. #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
  84. #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
  85. #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
  86. #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
  87. #define dp_tx_info(params...) \
  88. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
  89. #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
  90. #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
  91. #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
  92. #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
  93. #define dp_tx_comp_info(params...) \
  94. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
  95. #define dp_tx_comp_info_rl(params...) \
  96. __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
  97. #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
  98. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  99. /**
  100. * struct dp_tx_frag_info_s
  101. * @vaddr: hlos vritual address for buffer
  102. * @paddr_lo: physical address lower 32bits
  103. * @paddr_hi: physical address higher bits
  104. * @len: length of the buffer
  105. */
  106. struct dp_tx_frag_info_s {
  107. uint8_t *vaddr;
  108. uint32_t paddr_lo;
  109. uint16_t paddr_hi;
  110. uint16_t len;
  111. };
  112. /**
  113. * struct dp_tx_seg_info_s - Segmentation Descriptor
  114. * @nbuf: NBUF pointer if segment corresponds to separate nbuf
  115. * @frag_cnt: Fragment count in this segment
  116. * @total_len: Total length of segment
  117. * @frags: per-Fragment information
  118. * @next: pointer to next MSDU segment
  119. */
  120. struct dp_tx_seg_info_s {
  121. qdf_nbuf_t nbuf;
  122. uint16_t frag_cnt;
  123. uint16_t total_len;
  124. struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
  125. struct dp_tx_seg_info_s *next;
  126. };
  127. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  128. /**
  129. * struct dp_tx_sg_info_s - Scatter Gather Descriptor
  130. * @num_segs: Number of segments (TSO/ME) in the frame
  131. * @total_len: Total length of the frame
  132. * @curr_seg: Points to current segment descriptor to be processed. Chain of
  133. * descriptors for SG frames/multicast-unicast converted packets.
  134. *
  135. * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
  136. * carry fragmentation information
  137. * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
  138. * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
  139. * converted into set of skb sg (nr_frags) structures.
  140. */
  141. struct dp_tx_sg_info_s {
  142. uint32_t num_segs;
  143. uint32_t total_len;
  144. struct dp_tx_seg_info_s *curr_seg;
  145. };
  146. /**
  147. * struct dp_tx_queue - Tx queue
  148. * @desc_pool_id: Descriptor Pool to be used for the tx queue
  149. * @ring_id: TCL descriptor ring ID corresponding to the tx queue
  150. *
  151. * Tx queue contains information of the software (Descriptor pool)
  152. * and hardware resources (TCL ring id) to be used for a particular
  153. * transmit queue (obtained from skb_queue_mapping in case of linux)
  154. */
  155. struct dp_tx_queue {
  156. uint8_t desc_pool_id;
  157. uint8_t ring_id;
  158. };
  159. /**
  160. * struct dp_tx_msdu_info_s - MSDU Descriptor
  161. * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
  162. * @tx_queue: Tx queue on which this MSDU should be transmitted
  163. * @num_seg: Number of segments (TSO)
  164. * @tid: TID (override) that is sent from HLOS
  165. * @u.tso_info: TSO information for TSO frame types
  166. * (chain of the TSO segments, number of segments)
  167. * @u.sg_info: Scatter Gather information for non-TSO SG frames
  168. * @meta_data: Mesh meta header information
  169. * @exception_fw: Duplicate frame to be sent to firmware
  170. * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
  171. * @ix_tx_sniffer: Indicates if the packet has to be sniffed
  172. * @gsn: global sequence for reinjected mcast packets
  173. * @vdev_id : vdev_id for reinjected mcast packets
  174. * @skip_hp_update : Skip HP update for TSO segments and update in last segment
  175. *
  176. * This structure holds the complete MSDU information needed to program the
  177. * Hardware TCL and MSDU extension descriptors for different frame types
  178. *
  179. */
  180. struct dp_tx_msdu_info_s {
  181. enum dp_tx_frm_type frm_type;
  182. struct dp_tx_queue tx_queue;
  183. uint32_t num_seg;
  184. uint8_t tid;
  185. uint8_t exception_fw;
  186. uint8_t is_tx_sniffer;
  187. union {
  188. struct qdf_tso_info_t tso_info;
  189. struct dp_tx_sg_info_s sg_info;
  190. } u;
  191. uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
  192. uint16_t ppdu_cookie;
  193. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  194. #ifdef WLAN_MCAST_MLO
  195. uint16_t gsn;
  196. uint8_t vdev_id;
  197. #endif
  198. #endif
  199. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  200. uint8_t skip_hp_update;
  201. #endif
  202. };
  203. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  204. /**
  205. * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
  206. * @soc: core txrx context
  207. * @index: index of ring to deinit
  208. *
  209. * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
  210. * index of the respective TCL/WBM2SW release in soc structure.
  211. * For example, if the index is 2 then &soc->tcl_data_ring[2]
  212. * and &soc->tx_comp_ring[2] will be deinitialized.
  213. *
  214. * Return: none
  215. */
  216. void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
  217. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  218. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
  219. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
  220. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool);
  221. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool);
  222. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  223. uint8_t num_pool,
  224. uint32_t num_desc);
  225. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  226. uint8_t num_pool,
  227. uint32_t num_desc);
  228. qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
  229. bool delayed_free);
  230. void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
  231. void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
  232. uint8_t tid, uint8_t ring_id);
  233. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  234. struct dp_tx_desc_s *tx_desc,
  235. struct hal_tx_completion_status *ts,
  236. struct dp_txrx_peer *txrx_peer,
  237. uint8_t ring_id);
  238. void dp_tx_comp_process_desc(struct dp_soc *soc,
  239. struct dp_tx_desc_s *desc,
  240. struct hal_tx_completion_status *ts,
  241. struct dp_txrx_peer *txrx_peer);
  242. void dp_tx_reinject_handler(struct dp_soc *soc,
  243. struct dp_vdev *vdev,
  244. struct dp_tx_desc_s *tx_desc,
  245. uint8_t *status,
  246. uint8_t reinject_reason);
  247. void dp_tx_inspect_handler(struct dp_soc *soc,
  248. struct dp_vdev *vdev,
  249. struct dp_tx_desc_s *tx_desc,
  250. uint8_t *status);
  251. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  252. uint32_t length, uint8_t tx_status,
  253. bool update);
  254. #ifdef DP_UMAC_HW_RESET_SUPPORT
  255. qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
  256. qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  257. qdf_nbuf_t nbuf,
  258. struct cdp_tx_exception_metadata *tx_exc_metadata);
  259. #endif
  260. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  261. /**
  262. * dp_tso_attach() - TSO Attach handler
  263. * @txrx_soc: Opaque Dp handle
  264. *
  265. * Reserve TSO descriptor buffers
  266. *
  267. * Return: QDF_STATUS_E_FAILURE on failure or
  268. * QDF_STATUS_SUCCESS on success
  269. */
  270. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
  271. /**
  272. * dp_tso_detach() - TSO Detach handler
  273. * @txrx_soc: Opaque Dp handle
  274. *
  275. * Deallocate TSO descriptor buffers
  276. *
  277. * Return: QDF_STATUS_E_FAILURE on failure or
  278. * QDF_STATUS_SUCCESS on success
  279. */
  280. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
  281. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
  282. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
  283. qdf_nbuf_t nbuf);
  284. qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
  285. qdf_nbuf_t nbuf,
  286. struct cdp_tx_exception_metadata *tx_exc);
  287. qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
  288. uint8_t vdev_id,
  289. qdf_nbuf_t nbuf,
  290. struct cdp_tx_exception_metadata *tx_exc);
  291. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  292. qdf_nbuf_t nbuf);
  293. qdf_nbuf_t
  294. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  295. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  296. struct cdp_tx_exception_metadata *tx_exc_metadata);
  297. #if QDF_LOCK_STATS
  298. noinline qdf_nbuf_t
  299. dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  300. struct dp_tx_msdu_info_s *msdu_info);
  301. #else
  302. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  303. struct dp_tx_msdu_info_s *msdu_info);
  304. #endif
  305. #ifdef FEATURE_WLAN_TDLS
  306. /**
  307. * dp_tx_non_std() - Allow the control-path SW to send data frames
  308. * @soc_hdl: Datapath soc handle
  309. * @vdev_id: id of vdev
  310. * @tx_spec: what non-standard handling to apply to the tx data frames
  311. * @msdu_list: NULL-terminated list of tx MSDUs
  312. *
  313. * Return: NULL on success,
  314. * nbuf when it fails to send
  315. */
  316. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  317. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  318. #endif
  319. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
  320. /**
  321. * dp_tx_comp_handler() - Tx completion handler
  322. * @int_ctx: pointer to DP interrupt context
  323. * @soc: core txrx main context
  324. * @hal_srng: Opaque HAL SRNG pointer
  325. * @ring_id: completion ring id
  326. * @quota: No. of packets/descriptors that can be serviced in one loop
  327. *
  328. * This function will collect hardware release ring element contents and
  329. * handle descriptor contents. Based on contents, free packet or handle error
  330. * conditions
  331. *
  332. * Return: Number of TX completions processed
  333. */
  334. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  335. hal_ring_handle_t hal_srng, uint8_t ring_id,
  336. uint32_t quota);
  337. QDF_STATUS
  338. dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
  339. QDF_STATUS
  340. dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
  341. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  342. #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
  343. static inline void dp_tx_me_exit(struct dp_pdev *pdev)
  344. {
  345. return;
  346. }
  347. #endif
  348. /**
  349. * dp_tx_pdev_init() - dp tx pdev init
  350. * @pdev: physical device instance
  351. *
  352. * Return: QDF_STATUS_SUCCESS: success
  353. * QDF_STATUS_E_RESOURCES: Error return
  354. */
  355. static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
  356. {
  357. struct dp_soc *soc = pdev->soc;
  358. /* Initialize Flow control counters */
  359. qdf_atomic_init(&pdev->num_tx_outstanding);
  360. pdev->tx_descs_max = 0;
  361. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  362. /* Initialize descriptors in TCL Ring */
  363. hal_tx_init_data_ring(soc->hal_soc,
  364. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  365. }
  366. return QDF_STATUS_SUCCESS;
  367. }
  368. /**
  369. * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
  370. * @soc: Handle to HAL Soc structure
  371. * @hal_soc: HAL SOC handle
  372. * @num_avail_for_reap: descriptors available for reap
  373. * @hal_ring_hdl: ring pointer
  374. * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
  375. * @last_prefetched_sw_desc: pointer to last prefetch SW desc
  376. *
  377. * Return: None
  378. */
  379. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  380. static inline
  381. void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
  382. hal_soc_handle_t hal_soc,
  383. uint32_t num_avail_for_reap,
  384. hal_ring_handle_t hal_ring_hdl,
  385. void **last_prefetched_hw_desc,
  386. struct dp_tx_desc_s
  387. **last_prefetched_sw_desc)
  388. {
  389. if (*last_prefetched_sw_desc) {
  390. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
  391. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
  392. }
  393. if (num_avail_for_reap && *last_prefetched_hw_desc) {
  394. dp_tx_comp_get_prefetched_params_from_hal_desc(
  395. soc,
  396. *last_prefetched_hw_desc,
  397. last_prefetched_sw_desc);
  398. *last_prefetched_hw_desc =
  399. hal_srng_dst_prefetch_next_cached_desc(
  400. hal_soc,
  401. hal_ring_hdl,
  402. (uint8_t *)*last_prefetched_hw_desc);
  403. }
  404. }
  405. #else
  406. static inline
  407. void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
  408. hal_soc_handle_t hal_soc,
  409. uint32_t num_avail_for_reap,
  410. hal_ring_handle_t hal_ring_hdl,
  411. void **last_prefetched_hw_desc,
  412. struct dp_tx_desc_s
  413. **last_prefetched_sw_desc)
  414. {
  415. }
  416. #endif
  417. #ifndef FEATURE_WDS
  418. static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
  419. {
  420. return;
  421. }
  422. #endif
  423. #ifndef QCA_MULTIPASS_SUPPORT
  424. static inline
  425. bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  426. qdf_nbuf_t nbuf,
  427. struct dp_tx_msdu_info_s *msdu_info)
  428. {
  429. return true;
  430. }
  431. static inline
  432. void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
  433. {
  434. }
  435. #else
  436. bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  437. qdf_nbuf_t nbuf,
  438. struct dp_tx_msdu_info_s *msdu_info);
  439. void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
  440. void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
  441. void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
  442. struct dp_tx_msdu_info_s *msdu_info,
  443. uint16_t group_key);
  444. #endif
  445. /**
  446. * dp_tx_hw_to_qdf()- convert hw status to qdf status
  447. * @status: hw status
  448. *
  449. * Return: qdf tx rx status
  450. */
  451. static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
  452. {
  453. switch (status) {
  454. case HAL_TX_TQM_RR_FRAME_ACKED:
  455. return QDF_TX_RX_STATUS_OK;
  456. case HAL_TX_TQM_RR_REM_CMD_TX:
  457. return QDF_TX_RX_STATUS_NO_ACK;
  458. case HAL_TX_TQM_RR_REM_CMD_REM:
  459. case HAL_TX_TQM_RR_REM_CMD_NOTX:
  460. case HAL_TX_TQM_RR_REM_CMD_AGED:
  461. return QDF_TX_RX_STATUS_FW_DISCARD;
  462. default:
  463. return QDF_TX_RX_STATUS_DEFAULT;
  464. }
  465. }
  466. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  467. /**
  468. * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
  469. * @vdev: DP Virtual device handle
  470. * @nbuf: Buffer pointer
  471. * @queue: queue ids container for nbuf
  472. *
  473. * TX packet queue has 2 instances, software descriptors id and dma ring id
  474. * Based on tx feature and hardware configuration queue id combination could be
  475. * different.
  476. * For example -
  477. * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
  478. * With no XPS,lock based resource protection, Descriptor pool ids are different
  479. * for each vdev, dma ring id will be same as single pdev id
  480. *
  481. * Return: None
  482. */
  483. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  484. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  485. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  486. {
  487. queue->ring_id = qdf_get_cpu();
  488. queue->desc_pool_id = queue->ring_id;
  489. }
  490. /*
  491. * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission
  492. * @dp_soc - DP soc structure pointer
  493. * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
  494. *
  495. * Return - HAL ring handle
  496. */
  497. static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
  498. uint8_t ring_id)
  499. {
  500. if (ring_id == soc->num_tcl_data_rings)
  501. return soc->tcl_cmd_credit_ring.hal_srng;
  502. return soc->tcl_data_ring[ring_id].hal_srng;
  503. }
  504. #else /* QCA_OL_TX_MULTIQ_SUPPORT */
  505. #ifdef TX_MULTI_TCL
  506. #ifdef IPA_OFFLOAD
  507. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  508. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  509. {
  510. /* get flow id */
  511. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  512. if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
  513. queue->ring_id = DP_TX_GET_RING_ID(vdev);
  514. else
  515. queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
  516. vdev->pdev->soc->num_tcl_data_rings);
  517. }
  518. #else
  519. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  520. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  521. {
  522. /* get flow id */
  523. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  524. queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
  525. vdev->pdev->soc->num_tcl_data_rings);
  526. }
  527. #endif
  528. #else
  529. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  530. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  531. {
  532. /* get flow id */
  533. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  534. queue->ring_id = DP_TX_GET_RING_ID(vdev);
  535. }
  536. #endif
  537. static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
  538. uint8_t ring_id)
  539. {
  540. return soc->tcl_data_ring[ring_id].hal_srng;
  541. }
  542. #endif
  543. #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
  544. /*
  545. * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission
  546. * @dp_soc - DP soc structure pointer
  547. * @hal_ring_hdl - HAL ring handle
  548. *
  549. * Return - None
  550. */
  551. static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
  552. hal_ring_handle_t hal_ring_hdl)
  553. {
  554. return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
  555. }
  556. /*
  557. * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission
  558. * @dp_soc - DP soc structure pointer
  559. * @hal_ring_hdl - HAL ring handle
  560. *
  561. * Return - None
  562. */
  563. static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
  564. hal_ring_handle_t hal_ring_hdl)
  565. {
  566. hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
  567. }
  568. /*
  569. * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission
  570. * @dp_soc - DP soc structure pointer
  571. * @hal_ring_hdl - HAL ring handle
  572. *
  573. * Return - None
  574. */
  575. static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
  576. hal_ring_handle_t
  577. hal_ring_hdl)
  578. {
  579. }
  580. #else
  581. static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
  582. hal_ring_handle_t hal_ring_hdl)
  583. {
  584. return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
  585. }
  586. static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
  587. hal_ring_handle_t hal_ring_hdl)
  588. {
  589. hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
  590. }
  591. static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
  592. hal_ring_handle_t
  593. hal_ring_hdl)
  594. {
  595. hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
  596. }
  597. #endif
  598. #ifdef ATH_TX_PRI_OVERRIDE
  599. #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
  600. ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
  601. #else
  602. #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
  603. #endif
  604. /* TODO TX_FEATURE_NOT_YET */
  605. static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
  606. {
  607. return;
  608. }
  609. /* TODO TX_FEATURE_NOT_YET */
  610. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  611. bool force_free);
  612. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
  613. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
  614. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
  615. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
  616. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
  617. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
  618. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
  619. void
  620. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  621. uint32_t buf_type);
  622. #else /* QCA_HOST_MODE_WIFI_DISABLED */
  623. static inline
  624. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  625. {
  626. return QDF_STATUS_SUCCESS;
  627. }
  628. static inline
  629. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  630. {
  631. return QDF_STATUS_SUCCESS;
  632. }
  633. static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  634. {
  635. }
  636. static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  637. {
  638. }
  639. static inline
  640. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  641. bool force_free)
  642. {
  643. }
  644. static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  645. {
  646. return QDF_STATUS_SUCCESS;
  647. }
  648. static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  649. {
  650. return QDF_STATUS_SUCCESS;
  651. }
  652. static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  653. {
  654. }
  655. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  656. #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
  657. defined(QCA_TX_CAPTURE_SUPPORT) || \
  658. defined(QCA_MCOPY_SUPPORT)
  659. #ifdef FEATURE_PERPKT_INFO
  660. QDF_STATUS
  661. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  662. struct dp_pdev *pdev,
  663. struct dp_txrx_peer *peer,
  664. struct hal_tx_completion_status *ts,
  665. qdf_nbuf_t netbuf,
  666. uint64_t time_latency);
  667. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  668. uint16_t peer_id, uint32_t ppdu_id,
  669. qdf_nbuf_t netbuf);
  670. #endif
  671. #else
  672. static inline
  673. QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
  674. struct dp_pdev *pdev,
  675. struct dp_txrx_peer *peer,
  676. struct hal_tx_completion_status *ts,
  677. qdf_nbuf_t netbuf,
  678. uint64_t time_latency)
  679. {
  680. return QDF_STATUS_E_NOSUPPORT;
  681. }
  682. static inline
  683. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  684. uint16_t peer_id, uint32_t ppdu_id,
  685. qdf_nbuf_t netbuf)
  686. {
  687. }
  688. #endif
  689. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  690. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  691. struct dp_tx_desc_s *desc,
  692. struct hal_tx_completion_status *ts);
  693. #else
  694. static inline void
  695. dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  696. struct dp_tx_desc_s *desc,
  697. struct hal_tx_completion_status *ts)
  698. {
  699. }
  700. #endif
  701. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  702. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  703. /**
  704. * dp_tx_update_stats() - Update soc level tx stats
  705. * @soc: DP soc handle
  706. * @tx_desc: TX descriptor reference
  707. * @ring_id: TCL ring id
  708. *
  709. * Returns: none
  710. */
  711. void dp_tx_update_stats(struct dp_soc *soc,
  712. struct dp_tx_desc_s *tx_desc,
  713. uint8_t ring_id);
  714. /**
  715. * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
  716. * @soc: Datapath soc handle
  717. * @tx_desc: tx packet descriptor
  718. * @tid: TID for pkt transmission
  719. * @msdu_info: MSDU info of tx packet
  720. * @ring_id: TCL ring id
  721. *
  722. * Returns: 1, if coalescing is to be done
  723. * 0, if coalescing is not to be done
  724. */
  725. int
  726. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  727. struct dp_tx_desc_s *tx_desc,
  728. uint8_t tid,
  729. struct dp_tx_msdu_info_s *msdu_info,
  730. uint8_t ring_id);
  731. /**
  732. * dp_tx_ring_access_end() - HAL ring access end for data transmission
  733. * @soc: Datapath soc handle
  734. * @hal_ring_hdl: HAL ring handle
  735. * @coalesce: Coalesce the current write or not
  736. *
  737. * Returns: none
  738. */
  739. void
  740. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  741. int coalesce);
  742. #else
  743. /**
  744. * dp_tx_update_stats() - Update soc level tx stats
  745. * @soc: DP soc handle
  746. * @tx_desc: TX descriptor reference
  747. * @ring_id: TCL ring id
  748. *
  749. * Returns: none
  750. */
  751. static inline void dp_tx_update_stats(struct dp_soc *soc,
  752. struct dp_tx_desc_s *tx_desc,
  753. uint8_t ring_id){ }
  754. static inline void
  755. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  756. int coalesce)
  757. {
  758. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  759. }
  760. static inline int
  761. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  762. struct dp_tx_desc_s *tx_desc,
  763. uint8_t tid,
  764. struct dp_tx_msdu_info_s *msdu_info,
  765. uint8_t ring_id)
  766. {
  767. return 0;
  768. }
  769. #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
  770. #ifdef FEATURE_RUNTIME_PM
  771. /**
  772. * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
  773. * @soc_hdl: DP soc handle
  774. * @is_high_tput: flag to indicate whether throughput is high
  775. *
  776. * Returns: none
  777. */
  778. static inline
  779. void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
  780. bool is_high_tput)
  781. {
  782. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  783. qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
  784. }
  785. void
  786. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  787. hal_ring_handle_t hal_ring_hdl,
  788. int coalesce);
  789. #else
  790. #ifdef DP_POWER_SAVE
  791. void
  792. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  793. hal_ring_handle_t hal_ring_hdl,
  794. int coalesce);
  795. #else
  796. static inline void
  797. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  798. hal_ring_handle_t hal_ring_hdl,
  799. int coalesce)
  800. {
  801. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  802. }
  803. #endif
  804. static inline void
  805. dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
  806. bool is_high_tput)
  807. { }
  808. #endif
  809. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  810. #ifdef DP_TX_HW_DESC_HISTORY
  811. static inline void
  812. dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
  813. hal_ring_handle_t hal_ring_hdl,
  814. struct dp_soc *soc)
  815. {
  816. struct dp_tx_hw_desc_evt *evt;
  817. uint64_t idx = 0;
  818. if (!soc->tx_hw_desc_history)
  819. return;
  820. idx = ++soc->tx_hw_desc_history->index;
  821. if (idx == DP_TX_HW_DESC_HIST_MAX)
  822. soc->tx_hw_desc_history->index = 0;
  823. idx = qdf_do_div_rem(idx, DP_TX_HW_DESC_HIST_MAX);
  824. evt = &soc->tx_hw_desc_history->entry[idx];
  825. qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  826. evt->posted = qdf_get_log_timestamp();
  827. hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
  828. }
  829. #else
  830. static inline void
  831. dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
  832. hal_ring_handle_t hal_ring_hdl,
  833. struct dp_soc *soc)
  834. {
  835. }
  836. #endif
  837. #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
  838. /**
  839. * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
  840. * @ts: Tx completion status
  841. * @delta_tsf: Difference between TSF clock and qtimer
  842. * @delay_us: Delay in microseconds
  843. *
  844. * Return: QDF_STATUS_SUCCESS : Success
  845. * QDF_STATUS_E_INVAL : Tx completion status is invalid or
  846. * delay_us is NULL
  847. * QDF_STATUS_E_FAILURE : Error in delay calculation
  848. */
  849. QDF_STATUS
  850. dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
  851. uint32_t delta_tsf,
  852. uint32_t *delay_us);
  853. /**
  854. * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
  855. * @soc_hdl: cdp soc pointer
  856. * @vdev_id: vdev id
  857. * @delta_tsf: difference between TSF clock and qtimer
  858. *
  859. * Return: None
  860. */
  861. void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  862. uint32_t delta_tsf);
  863. #endif
  864. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  865. /**
  866. * dp_set_tsf_report_ul_delay() - Enable or disable reporting uplink delay
  867. * @soc_hdl: cdp soc pointer
  868. * @vdev_id: vdev id
  869. * @enable: true to enable and false to disable
  870. *
  871. * Return: QDF_STATUS
  872. */
  873. QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
  874. uint8_t vdev_id, bool enable);
  875. /**
  876. * dp_get_uplink_delay() - Get uplink delay value
  877. * @soc_hdl: cdp soc pointer
  878. * @vdev_id: vdev id
  879. * @val: pointer to save uplink delay value
  880. *
  881. * Return: QDF_STATUS
  882. */
  883. QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  884. uint32_t *val);
  885. #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
  886. /**
  887. * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
  888. *
  889. * Return: True if any tx pkt tracepoint is enabled else false
  890. */
  891. static inline
  892. bool dp_tx_pkt_tracepoints_enabled(void)
  893. {
  894. return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
  895. qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
  896. qdf_trace_dp_tx_comp_pkt_enabled());
  897. }
  898. #ifdef DP_TX_TRACKING
  899. /**
  900. * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
  901. * @tx_desc - tx descriptor
  902. *
  903. * Return: None
  904. */
  905. static inline
  906. void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
  907. {
  908. tx_desc->timestamp_tick = qdf_system_ticks();
  909. }
  910. /**
  911. * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
  912. * @tx_desc: tx descriptor
  913. *
  914. * Check for corruption in tx descriptor, if magic pattern is not matching
  915. * trigger self recovery
  916. *
  917. * Return: none
  918. */
  919. void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
  920. #else
  921. static inline
  922. void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
  923. {
  924. }
  925. static inline
  926. void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
  927. {
  928. }
  929. #endif
  930. #ifndef CONFIG_SAWF
  931. static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
  932. {
  933. return false;
  934. }
  935. #endif
  936. #ifdef HW_TX_DELAY_STATS_ENABLE
  937. /**
  938. * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
  939. * @vdev: DP vdev handle
  940. * @tx_desc: tx descriptor
  941. *
  942. * Return: true when descriptor is timestamped, false otherwise
  943. */
  944. static inline
  945. bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
  946. struct dp_tx_desc_s *tx_desc)
  947. {
  948. if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
  949. qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
  950. qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
  951. qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
  952. qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
  953. tx_desc->timestamp = qdf_ktime_real_get();
  954. return true;
  955. }
  956. return false;
  957. }
  958. #else
  959. static inline
  960. bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
  961. struct dp_tx_desc_s *tx_desc)
  962. {
  963. if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
  964. qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
  965. qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
  966. qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
  967. tx_desc->timestamp = qdf_ktime_real_get();
  968. return true;
  969. }
  970. return false;
  971. }
  972. #endif
  973. #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
  974. /**
  975. * dp_pkt_add_timestamp() - add timestamp in data payload
  976. *
  977. * @vdev: dp vdev
  978. * @index: index to decide offset in payload
  979. * @time: timestamp to add in data payload
  980. * @nbuf: network buffer
  981. *
  982. * Return: none
  983. */
  984. void dp_pkt_add_timestamp(struct dp_vdev *vdev,
  985. enum qdf_pkt_timestamp_index index, uint64_t time,
  986. qdf_nbuf_t nbuf);
  987. /**
  988. * dp_pkt_get_timestamp() - get current system time
  989. *
  990. * @time: return current system time
  991. *
  992. * Return: none
  993. */
  994. void dp_pkt_get_timestamp(uint64_t *time);
  995. #else
  996. #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
  997. static inline
  998. void dp_pkt_get_timestamp(uint64_t *time)
  999. {
  1000. }
  1001. #endif
  1002. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  1003. /**
  1004. * dp_update_tx_desc_stats - Update the increase or decrease in
  1005. * outstanding tx desc count
  1006. * values on pdev and soc
  1007. * @vdev: DP pdev handle
  1008. *
  1009. * Return: void
  1010. */
  1011. static inline void
  1012. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  1013. {
  1014. int32_t tx_descs_cnt =
  1015. qdf_atomic_read(&pdev->num_tx_outstanding);
  1016. if (pdev->tx_descs_max < tx_descs_cnt)
  1017. pdev->tx_descs_max = tx_descs_cnt;
  1018. qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
  1019. pdev->tx_descs_max);
  1020. }
  1021. #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
  1022. static inline void
  1023. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  1024. {
  1025. }
  1026. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  1027. #ifdef QCA_TX_LIMIT_CHECK
  1028. /**
  1029. * dp_tx_limit_check - Check if allocated tx descriptors reached
  1030. * soc max limit and pdev max limit
  1031. * @vdev: DP vdev handle
  1032. *
  1033. * Return: true if allocated tx descriptors reached max configured value, else
  1034. * false
  1035. */
  1036. static inline bool
  1037. dp_tx_limit_check(struct dp_vdev *vdev)
  1038. {
  1039. struct dp_pdev *pdev = vdev->pdev;
  1040. struct dp_soc *soc = pdev->soc;
  1041. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  1042. soc->num_tx_allowed) {
  1043. dp_tx_info("queued packets are more than max tx, drop the frame");
  1044. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1045. return true;
  1046. }
  1047. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  1048. pdev->num_tx_allowed) {
  1049. dp_tx_info("queued packets are more than max tx, drop the frame");
  1050. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1051. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
  1052. return true;
  1053. }
  1054. return false;
  1055. }
  1056. /**
  1057. * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
  1058. * reached soc max limit
  1059. * @vdev: DP vdev handle
  1060. *
  1061. * Return: true if allocated tx descriptors reached max configured value, else
  1062. * false
  1063. */
  1064. static inline bool
  1065. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  1066. {
  1067. struct dp_pdev *pdev = vdev->pdev;
  1068. struct dp_soc *soc = pdev->soc;
  1069. if (qdf_atomic_read(&soc->num_tx_exception) >=
  1070. soc->num_msdu_exception_desc) {
  1071. dp_info("exc packets are more than max drop the exc pkt");
  1072. DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
  1073. return true;
  1074. }
  1075. return false;
  1076. }
  1077. /**
  1078. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  1079. * @vdev: DP pdev handle
  1080. *
  1081. * Return: void
  1082. */
  1083. static inline void
  1084. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  1085. {
  1086. struct dp_soc *soc = pdev->soc;
  1087. qdf_atomic_inc(&pdev->num_tx_outstanding);
  1088. qdf_atomic_inc(&soc->num_tx_outstanding);
  1089. dp_update_tx_desc_stats(pdev);
  1090. }
  1091. /**
  1092. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  1093. * @vdev: DP pdev handle
  1094. *
  1095. * Return: void
  1096. */
  1097. static inline void
  1098. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  1099. {
  1100. struct dp_soc *soc = pdev->soc;
  1101. qdf_atomic_dec(&pdev->num_tx_outstanding);
  1102. qdf_atomic_dec(&soc->num_tx_outstanding);
  1103. dp_update_tx_desc_stats(pdev);
  1104. }
  1105. #else //QCA_TX_LIMIT_CHECK
  1106. static inline bool
  1107. dp_tx_limit_check(struct dp_vdev *vdev)
  1108. {
  1109. return false;
  1110. }
  1111. static inline bool
  1112. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  1113. {
  1114. return false;
  1115. }
  1116. static inline void
  1117. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  1118. {
  1119. qdf_atomic_inc(&pdev->num_tx_outstanding);
  1120. dp_update_tx_desc_stats(pdev);
  1121. }
  1122. static inline void
  1123. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  1124. {
  1125. qdf_atomic_dec(&pdev->num_tx_outstanding);
  1126. dp_update_tx_desc_stats(pdev);
  1127. }
  1128. #endif //QCA_TX_LIMIT_CHECK
  1129. #endif