dp_tx.h 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef __DP_TX_H
  20. #define __DP_TX_H
  21. #include <qdf_types.h>
  22. #include <qdf_nbuf.h>
  23. #include "dp_types.h"
  24. #ifdef FEATURE_PERPKT_INFO
  25. #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
  26. defined(QCA_TX_CAPTURE_SUPPORT) || \
  27. defined(QCA_MCOPY_SUPPORT)
  28. #include "if_meta_hdr.h"
  29. #endif
  30. #endif
  31. #include "dp_internal.h"
  32. #include "hal_tx.h"
  33. #include <qdf_tracepoint.h>
  34. #ifdef CONFIG_SAWF
  35. #include "dp_sawf.h"
  36. #endif
  37. #include <qdf_pkt_add_timestamp.h>
  38. #define DP_INVALID_VDEV_ID 0xFF
  39. #define DP_TX_MAX_NUM_FRAGS 6
  40. /*
  41. * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
  42. * please do not change this flag's definition
  43. */
  44. #define DP_TX_DESC_FLAG_FRAG 0x1
  45. #define DP_TX_DESC_FLAG_TO_FW 0x2
  46. #define DP_TX_DESC_FLAG_SIMPLE 0x4
  47. #define DP_TX_DESC_FLAG_RAW 0x8
  48. #define DP_TX_DESC_FLAG_MESH 0x10
  49. #define DP_TX_DESC_FLAG_QUEUED_TX 0x20
  50. #define DP_TX_DESC_FLAG_COMPLETED_TX 0x40
  51. #define DP_TX_DESC_FLAG_ME 0x80
  52. #define DP_TX_DESC_FLAG_TDLS_FRAME 0x100
  53. #define DP_TX_DESC_FLAG_ALLOCATED 0x200
  54. #define DP_TX_DESC_FLAG_MESH_MODE 0x400
  55. #define DP_TX_DESC_FLAG_UNMAP_DONE 0x800
  56. #define DP_TX_DESC_FLAG_TX_COMP_ERR 0x1000
  57. #define DP_TX_DESC_FLAG_FLUSH 0x2000
  58. #define DP_TX_DESC_FLAG_TRAFFIC_END_IND 0x4000
  59. #define DP_TX_DESC_FLAG_RMNET 0x8000
  60. /*
  61. * Since the Tx descriptor flag is of only 16-bit and no more bit is free for
  62. * any new flag, therefore for time being overloading PPEDS flag with that of
  63. * FLUSH flag and FLAG_FAST with TDLS which is not enabled for WIN.
  64. */
  65. #define DP_TX_DESC_FLAG_PPEDS 0x2000
  66. #define DP_TX_DESC_FLAG_FAST 0x100
  67. #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
  68. #define DP_TX_FREE_SINGLE_BUF(soc, buf) \
  69. do { \
  70. qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \
  71. qdf_nbuf_free(buf); \
  72. } while (0)
  73. #define OCB_HEADER_VERSION 1
  74. #ifdef TX_PER_PDEV_DESC_POOL
  75. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  76. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  77. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  78. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
  79. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  80. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  81. #else
  82. #ifdef TX_PER_VDEV_DESC_POOL
  83. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  84. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  85. #endif /* TX_PER_VDEV_DESC_POOL */
  86. #endif /* TX_PER_PDEV_DESC_POOL */
  87. #define DP_TX_QUEUE_MASK 0x3
  88. #define MAX_CDP_SEC_TYPE 12
  89. /* number of dwords for htt_tx_msdu_desc_ext2_t */
  90. #define DP_TX_MSDU_INFO_META_DATA_DWORDS 7
  91. #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
  92. #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
  93. #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
  94. #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
  95. #define dp_tx_info(params...) \
  96. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
  97. #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
  98. #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
  99. #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
  100. #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
  101. #define dp_tx_comp_info(params...) \
  102. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
  103. #define dp_tx_comp_info_rl(params...) \
  104. __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
  105. #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
  106. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  107. /**
  108. * struct dp_tx_frag_info_s
  109. * @vaddr: hlos virtual address for buffer
  110. * @paddr_lo: physical address lower 32bits
  111. * @paddr_hi: physical address higher bits
  112. * @len: length of the buffer
  113. */
  114. struct dp_tx_frag_info_s {
  115. uint8_t *vaddr;
  116. uint32_t paddr_lo;
  117. uint16_t paddr_hi;
  118. uint16_t len;
  119. };
  120. /**
  121. * struct dp_tx_seg_info_s - Segmentation Descriptor
  122. * @nbuf: NBUF pointer if segment corresponds to separate nbuf
  123. * @frag_cnt: Fragment count in this segment
  124. * @total_len: Total length of segment
  125. * @frags: per-Fragment information
  126. * @next: pointer to next MSDU segment
  127. */
  128. struct dp_tx_seg_info_s {
  129. qdf_nbuf_t nbuf;
  130. uint16_t frag_cnt;
  131. uint16_t total_len;
  132. struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
  133. struct dp_tx_seg_info_s *next;
  134. };
  135. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  136. /**
  137. * struct dp_tx_sg_info_s - Scatter Gather Descriptor
  138. * @num_segs: Number of segments (TSO/ME) in the frame
  139. * @total_len: Total length of the frame
  140. * @curr_seg: Points to current segment descriptor to be processed. Chain of
  141. * descriptors for SG frames/multicast-unicast converted packets.
  142. *
  143. * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
  144. * carry fragmentation information
  145. * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
  146. * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
  147. * converted into set of skb sg (nr_frags) structures.
  148. */
  149. struct dp_tx_sg_info_s {
  150. uint32_t num_segs;
  151. uint32_t total_len;
  152. struct dp_tx_seg_info_s *curr_seg;
  153. };
  154. /**
  155. * struct dp_tx_queue - Tx queue
  156. * @desc_pool_id: Descriptor Pool to be used for the tx queue
  157. * @ring_id: TCL descriptor ring ID corresponding to the tx queue
  158. *
  159. * Tx queue contains information of the software (Descriptor pool)
  160. * and hardware resources (TCL ring id) to be used for a particular
  161. * transmit queue (obtained from skb_queue_mapping in case of linux)
  162. */
  163. struct dp_tx_queue {
  164. uint8_t desc_pool_id;
  165. uint8_t ring_id;
  166. };
  167. /**
  168. * struct dp_tx_msdu_info_s - MSDU Descriptor
  169. * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
  170. * @tx_queue: Tx queue on which this MSDU should be transmitted
  171. * @num_seg: Number of segments (TSO)
  172. * @tid: TID (override) that is sent from HLOS
  173. * @u.tso_info: TSO information for TSO frame types
  174. * (chain of the TSO segments, number of segments)
  175. * @u.sg_info: Scatter Gather information for non-TSO SG frames
  176. * @meta_data: Mesh meta header information
  177. * @exception_fw: Duplicate frame to be sent to firmware
  178. * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
  179. * @ix_tx_sniffer: Indicates if the packet has to be sniffed
  180. * @gsn: global sequence for reinjected mcast packets
  181. * @vdev_id : vdev_id for reinjected mcast packets
  182. * @skip_hp_update : Skip HP update for TSO segments and update in last segment
  183. *
  184. * This structure holds the complete MSDU information needed to program the
  185. * Hardware TCL and MSDU extension descriptors for different frame types
  186. *
  187. */
  188. struct dp_tx_msdu_info_s {
  189. enum dp_tx_frm_type frm_type;
  190. struct dp_tx_queue tx_queue;
  191. uint32_t num_seg;
  192. uint8_t tid;
  193. uint8_t exception_fw;
  194. uint8_t is_tx_sniffer;
  195. union {
  196. struct qdf_tso_info_t tso_info;
  197. struct dp_tx_sg_info_s sg_info;
  198. } u;
  199. uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
  200. uint16_t ppdu_cookie;
  201. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  202. #ifdef WLAN_MCAST_MLO
  203. uint16_t gsn;
  204. uint8_t vdev_id;
  205. #endif
  206. #endif
  207. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  208. uint8_t skip_hp_update;
  209. #endif
  210. #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
  211. uint16_t buf_len;
  212. uint8_t *payload_addr;
  213. #endif
  214. };
  215. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  216. /**
  217. * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
  218. * @soc: core txrx context
  219. * @index: index of ring to deinit
  220. *
  221. * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
  222. * index of the respective TCL/WBM2SW release in soc structure.
  223. * For example, if the index is 2 then &soc->tcl_data_ring[2]
  224. * and &soc->tx_comp_ring[2] will be deinitialized.
  225. *
  226. * Return: none
  227. */
  228. void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
  229. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  230. void
  231. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  232. struct dp_tx_desc_s *comp_head, uint8_t ring_id);
  233. qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
  234. bool delayed_free);
  235. void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
  236. void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
  237. uint8_t tid, uint8_t ring_id);
  238. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  239. struct dp_tx_desc_s *tx_desc,
  240. struct hal_tx_completion_status *ts,
  241. struct dp_txrx_peer *txrx_peer,
  242. uint8_t ring_id);
  243. void dp_tx_comp_process_desc(struct dp_soc *soc,
  244. struct dp_tx_desc_s *desc,
  245. struct hal_tx_completion_status *ts,
  246. struct dp_txrx_peer *txrx_peer);
  247. void dp_tx_reinject_handler(struct dp_soc *soc,
  248. struct dp_vdev *vdev,
  249. struct dp_tx_desc_s *tx_desc,
  250. uint8_t *status,
  251. uint8_t reinject_reason);
  252. void dp_tx_inspect_handler(struct dp_soc *soc,
  253. struct dp_vdev *vdev,
  254. struct dp_tx_desc_s *tx_desc,
  255. uint8_t *status);
  256. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  257. uint32_t length, uint8_t tx_status,
  258. bool update);
  259. #ifdef DP_UMAC_HW_RESET_SUPPORT
  260. qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
  261. qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  262. qdf_nbuf_t nbuf,
  263. struct cdp_tx_exception_metadata *tx_exc_metadata);
  264. #endif
  265. #ifdef WLAN_SUPPORT_PPEDS
  266. void dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc);
  267. #else
  268. static inline
  269. void dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  270. {
  271. }
  272. #endif
  273. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  274. /**
  275. * dp_tso_attach() - TSO Attach handler
  276. * @txrx_soc: Opaque Dp handle
  277. *
  278. * Reserve TSO descriptor buffers
  279. *
  280. * Return: QDF_STATUS_E_FAILURE on failure or
  281. * QDF_STATUS_SUCCESS on success
  282. */
  283. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
  284. /**
  285. * dp_tso_detach() - TSO Detach handler
  286. * @txrx_soc: Opaque Dp handle
  287. *
  288. * Deallocate TSO descriptor buffers
  289. *
  290. * Return: QDF_STATUS_E_FAILURE on failure or
  291. * QDF_STATUS_SUCCESS on success
  292. */
  293. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
  294. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
  295. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
  296. qdf_nbuf_t nbuf);
  297. qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
  298. qdf_nbuf_t nbuf,
  299. struct cdp_tx_exception_metadata *tx_exc);
  300. qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
  301. uint8_t vdev_id,
  302. qdf_nbuf_t nbuf,
  303. struct cdp_tx_exception_metadata *tx_exc);
  304. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  305. qdf_nbuf_t nbuf);
  306. qdf_nbuf_t
  307. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  308. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  309. struct cdp_tx_exception_metadata *tx_exc_metadata);
  310. /**
  311. * dp_tx_mcast_enhance
  312. * @vdev: DP vdev handle
  313. * @nbuf: network buffer to be transmitted
  314. *
  315. * Return: true on success
  316. * false on failure
  317. */
  318. bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t buf);
  319. #if QDF_LOCK_STATS
  320. noinline qdf_nbuf_t
  321. dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  322. struct dp_tx_msdu_info_s *msdu_info);
  323. #else
  324. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  325. struct dp_tx_msdu_info_s *msdu_info);
  326. #endif
  327. #ifdef FEATURE_WLAN_TDLS
  328. /**
  329. * dp_tx_non_std() - Allow the control-path SW to send data frames
  330. * @soc_hdl: Datapath soc handle
  331. * @vdev_id: id of vdev
  332. * @tx_spec: what non-standard handling to apply to the tx data frames
  333. * @msdu_list: NULL-terminated list of tx MSDUs
  334. *
  335. * Return: NULL on success,
  336. * nbuf when it fails to send
  337. */
  338. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  339. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  340. #endif
  341. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
  342. /**
  343. * dp_tx_comp_handler() - Tx completion handler
  344. * @int_ctx: pointer to DP interrupt context
  345. * @soc: core txrx main context
  346. * @hal_srng: Opaque HAL SRNG pointer
  347. * @ring_id: completion ring id
  348. * @quota: No. of packets/descriptors that can be serviced in one loop
  349. *
  350. * This function will collect hardware release ring element contents and
  351. * handle descriptor contents. Based on contents, free packet or handle error
  352. * conditions
  353. *
  354. * Return: Number of TX completions processed
  355. */
  356. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  357. hal_ring_handle_t hal_srng, uint8_t ring_id,
  358. uint32_t quota);
  359. QDF_STATUS
  360. dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
  361. QDF_STATUS
  362. dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
  363. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  364. #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
  365. static inline void dp_tx_me_exit(struct dp_pdev *pdev)
  366. {
  367. return;
  368. }
  369. #endif
  370. /**
  371. * dp_tx_pdev_init() - dp tx pdev init
  372. * @pdev: physical device instance
  373. *
  374. * Return: QDF_STATUS_SUCCESS: success
  375. * QDF_STATUS_E_RESOURCES: Error return
  376. */
  377. static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
  378. {
  379. struct dp_soc *soc = pdev->soc;
  380. /* Initialize Flow control counters */
  381. qdf_atomic_init(&pdev->num_tx_outstanding);
  382. pdev->tx_descs_max = 0;
  383. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  384. /* Initialize descriptors in TCL Ring */
  385. hal_tx_init_data_ring(soc->hal_soc,
  386. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  387. }
  388. return QDF_STATUS_SUCCESS;
  389. }
  390. /**
  391. * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
  392. * @soc: Handle to HAL Soc structure
  393. * @hal_soc: HAL SOC handle
  394. * @num_avail_for_reap: descriptors available for reap
  395. * @hal_ring_hdl: ring pointer
  396. * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
  397. * @last_prefetched_sw_desc: pointer to last prefetch SW desc
  398. *
  399. * Return: None
  400. */
  401. #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
  402. static inline
  403. void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
  404. hal_soc_handle_t hal_soc,
  405. uint32_t num_avail_for_reap,
  406. hal_ring_handle_t hal_ring_hdl,
  407. void **last_prefetched_hw_desc,
  408. struct dp_tx_desc_s
  409. **last_prefetched_sw_desc)
  410. {
  411. if (*last_prefetched_sw_desc) {
  412. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
  413. qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
  414. }
  415. if (num_avail_for_reap && *last_prefetched_hw_desc) {
  416. soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
  417. *last_prefetched_hw_desc,
  418. last_prefetched_sw_desc);
  419. if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
  420. *last_prefetched_hw_desc =
  421. hal_srng_dst_prefetch_next_cached_desc(
  422. hal_soc,
  423. hal_ring_hdl,
  424. (uint8_t *)*last_prefetched_hw_desc);
  425. else
  426. *last_prefetched_hw_desc =
  427. hal_srng_dst_get_next_32_byte_desc(hal_soc,
  428. hal_ring_hdl,
  429. (uint8_t *)*last_prefetched_hw_desc);
  430. }
  431. }
  432. #else
  433. static inline
  434. void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
  435. hal_soc_handle_t hal_soc,
  436. uint32_t num_avail_for_reap,
  437. hal_ring_handle_t hal_ring_hdl,
  438. void **last_prefetched_hw_desc,
  439. struct dp_tx_desc_s
  440. **last_prefetched_sw_desc)
  441. {
  442. }
  443. #endif
  444. #ifndef FEATURE_WDS
  445. static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
  446. {
  447. return;
  448. }
  449. #endif
  450. #ifndef QCA_MULTIPASS_SUPPORT
  451. static inline
  452. bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  453. qdf_nbuf_t nbuf,
  454. struct dp_tx_msdu_info_s *msdu_info)
  455. {
  456. return true;
  457. }
  458. static inline
  459. void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
  460. {
  461. }
  462. #else
  463. bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  464. qdf_nbuf_t nbuf,
  465. struct dp_tx_msdu_info_s *msdu_info);
  466. void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
  467. void dp_tx_remove_vlan_tag(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
  468. void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
  469. struct dp_tx_msdu_info_s *msdu_info,
  470. uint16_t group_key);
  471. #endif
  472. /**
  473. * dp_tx_hw_to_qdf()- convert hw status to qdf status
  474. * @status: hw status
  475. *
  476. * Return: qdf tx rx status
  477. */
  478. static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
  479. {
  480. switch (status) {
  481. case HAL_TX_TQM_RR_FRAME_ACKED:
  482. return QDF_TX_RX_STATUS_OK;
  483. case HAL_TX_TQM_RR_REM_CMD_TX:
  484. return QDF_TX_RX_STATUS_NO_ACK;
  485. case HAL_TX_TQM_RR_REM_CMD_REM:
  486. case HAL_TX_TQM_RR_REM_CMD_NOTX:
  487. case HAL_TX_TQM_RR_REM_CMD_AGED:
  488. return QDF_TX_RX_STATUS_FW_DISCARD;
  489. default:
  490. return QDF_TX_RX_STATUS_DEFAULT;
  491. }
  492. }
  493. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  494. /**
  495. * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
  496. * @vdev: DP Virtual device handle
  497. * @nbuf: Buffer pointer
  498. * @queue: queue ids container for nbuf
  499. *
  500. * TX packet queue has 2 instances, software descriptors id and dma ring id
  501. * Based on tx feature and hardware configuration queue id combination could be
  502. * different.
  503. * For example -
  504. * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
  505. * With no XPS,lock based resource protection, Descriptor pool ids are different
  506. * for each vdev, dma ring id will be same as single pdev id
  507. *
  508. * Return: None
  509. */
  510. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  511. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  512. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  513. {
  514. queue->ring_id = qdf_get_cpu();
  515. queue->desc_pool_id = queue->ring_id;
  516. }
  517. /*
  518. * dp_tx_get_hal_ring_hdl()- Get the hal_tx_ring_hdl for data transmission
  519. * @dp_soc - DP soc structure pointer
  520. * @ring_id - Transmit Queue/ring_id to be used when XPS is enabled
  521. *
  522. * Return - HAL ring handle
  523. */
  524. static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
  525. uint8_t ring_id)
  526. {
  527. if (ring_id == soc->num_tcl_data_rings)
  528. return soc->tcl_cmd_credit_ring.hal_srng;
  529. return soc->tcl_data_ring[ring_id].hal_srng;
  530. }
  531. #else /* QCA_OL_TX_MULTIQ_SUPPORT */
  532. #ifdef TX_MULTI_TCL
  533. #ifdef IPA_OFFLOAD
  534. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  535. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  536. {
  537. /* get flow id */
  538. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  539. if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
  540. queue->ring_id = DP_TX_GET_RING_ID(vdev);
  541. else
  542. queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
  543. vdev->pdev->soc->num_tcl_data_rings);
  544. }
  545. #else
  546. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  547. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  548. {
  549. /* get flow id */
  550. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  551. queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
  552. vdev->pdev->soc->num_tcl_data_rings);
  553. }
  554. #endif
  555. #else
  556. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  557. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  558. {
  559. /* get flow id */
  560. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  561. queue->ring_id = DP_TX_GET_RING_ID(vdev);
  562. }
  563. #endif
  564. static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
  565. uint8_t ring_id)
  566. {
  567. return soc->tcl_data_ring[ring_id].hal_srng;
  568. }
  569. #endif
  570. #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
  571. /*
  572. * dp_tx_hal_ring_access_start()- hal_tx_ring access for data transmission
  573. * @dp_soc - DP soc structure pointer
  574. * @hal_ring_hdl - HAL ring handle
  575. *
  576. * Return - None
  577. */
  578. static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
  579. hal_ring_handle_t hal_ring_hdl)
  580. {
  581. return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
  582. }
  583. /*
  584. * dp_tx_hal_ring_access_end()- hal_tx_ring access for data transmission
  585. * @dp_soc - DP soc structure pointer
  586. * @hal_ring_hdl - HAL ring handle
  587. *
  588. * Return - None
  589. */
  590. static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
  591. hal_ring_handle_t hal_ring_hdl)
  592. {
  593. hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
  594. }
  595. /*
  596. * dp_tx_hal_ring_access_reap()- hal_tx_ring access for data transmission
  597. * @dp_soc - DP soc structure pointer
  598. * @hal_ring_hdl - HAL ring handle
  599. *
  600. * Return - None
  601. */
  602. static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
  603. hal_ring_handle_t
  604. hal_ring_hdl)
  605. {
  606. }
  607. #else
  608. static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
  609. hal_ring_handle_t hal_ring_hdl)
  610. {
  611. return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
  612. }
  613. static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
  614. hal_ring_handle_t hal_ring_hdl)
  615. {
  616. hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
  617. }
  618. static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
  619. hal_ring_handle_t
  620. hal_ring_hdl)
  621. {
  622. hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
  623. }
  624. #endif
  625. #ifdef ATH_TX_PRI_OVERRIDE
  626. #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
  627. ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
  628. #else
  629. #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
  630. #endif
  631. /* TODO TX_FEATURE_NOT_YET */
  632. static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
  633. {
  634. return;
  635. }
  636. /* TODO TX_FEATURE_NOT_YET */
  637. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  638. bool force_free);
  639. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
  640. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
  641. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
  642. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
  643. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
  644. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
  645. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
  646. void
  647. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  648. uint32_t buf_type);
  649. #else /* QCA_HOST_MODE_WIFI_DISABLED */
  650. static inline
  651. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  652. {
  653. return QDF_STATUS_SUCCESS;
  654. }
  655. static inline
  656. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  657. {
  658. return QDF_STATUS_SUCCESS;
  659. }
  660. static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  661. {
  662. }
  663. static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  664. {
  665. }
  666. static inline
  667. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  668. bool force_free)
  669. {
  670. }
  671. static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  672. {
  673. return QDF_STATUS_SUCCESS;
  674. }
  675. static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  676. {
  677. return QDF_STATUS_SUCCESS;
  678. }
  679. static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  680. {
  681. }
  682. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  683. #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
  684. defined(QCA_TX_CAPTURE_SUPPORT) || \
  685. defined(QCA_MCOPY_SUPPORT)
  686. #ifdef FEATURE_PERPKT_INFO
  687. QDF_STATUS
  688. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  689. struct dp_pdev *pdev,
  690. struct dp_txrx_peer *peer,
  691. struct hal_tx_completion_status *ts,
  692. qdf_nbuf_t netbuf,
  693. uint64_t time_latency);
  694. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  695. uint16_t peer_id, uint32_t ppdu_id,
  696. qdf_nbuf_t netbuf);
  697. #endif
  698. #else
  699. static inline
  700. QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
  701. struct dp_pdev *pdev,
  702. struct dp_txrx_peer *peer,
  703. struct hal_tx_completion_status *ts,
  704. qdf_nbuf_t netbuf,
  705. uint64_t time_latency)
  706. {
  707. return QDF_STATUS_E_NOSUPPORT;
  708. }
  709. static inline
  710. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  711. uint16_t peer_id, uint32_t ppdu_id,
  712. qdf_nbuf_t netbuf)
  713. {
  714. }
  715. #endif
  716. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  717. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  718. struct dp_tx_desc_s *desc,
  719. struct hal_tx_completion_status *ts);
  720. #else
  721. static inline void
  722. dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  723. struct dp_tx_desc_s *desc,
  724. struct hal_tx_completion_status *ts)
  725. {
  726. }
  727. #endif
  728. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  729. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  730. /**
  731. * dp_tx_update_stats() - Update soc level tx stats
  732. * @soc: DP soc handle
  733. * @tx_desc: TX descriptor reference
  734. * @ring_id: TCL ring id
  735. *
  736. * Returns: none
  737. */
  738. void dp_tx_update_stats(struct dp_soc *soc,
  739. struct dp_tx_desc_s *tx_desc,
  740. uint8_t ring_id);
  741. /**
  742. * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
  743. * @soc: Datapath soc handle
  744. * @tx_desc: tx packet descriptor
  745. * @tid: TID for pkt transmission
  746. * @msdu_info: MSDU info of tx packet
  747. * @ring_id: TCL ring id
  748. *
  749. * Returns: 1, if coalescing is to be done
  750. * 0, if coalescing is not to be done
  751. */
  752. int
  753. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  754. struct dp_tx_desc_s *tx_desc,
  755. uint8_t tid,
  756. struct dp_tx_msdu_info_s *msdu_info,
  757. uint8_t ring_id);
  758. /**
  759. * dp_tx_ring_access_end() - HAL ring access end for data transmission
  760. * @soc: Datapath soc handle
  761. * @hal_ring_hdl: HAL ring handle
  762. * @coalesce: Coalesce the current write or not
  763. *
  764. * Returns: none
  765. */
  766. void
  767. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  768. int coalesce);
  769. #else
  770. /**
  771. * dp_tx_update_stats() - Update soc level tx stats
  772. * @soc: DP soc handle
  773. * @tx_desc: TX descriptor reference
  774. * @ring_id: TCL ring id
  775. *
  776. * Returns: none
  777. */
  778. static inline void dp_tx_update_stats(struct dp_soc *soc,
  779. struct dp_tx_desc_s *tx_desc,
  780. uint8_t ring_id){ }
  781. static inline void
  782. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  783. int coalesce)
  784. {
  785. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  786. }
  787. static inline int
  788. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  789. struct dp_tx_desc_s *tx_desc,
  790. uint8_t tid,
  791. struct dp_tx_msdu_info_s *msdu_info,
  792. uint8_t ring_id)
  793. {
  794. return 0;
  795. }
  796. #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
  797. #ifdef FEATURE_RUNTIME_PM
  798. /**
  799. * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
  800. * @soc_hdl: DP soc handle
  801. * @is_high_tput: flag to indicate whether throughput is high
  802. *
  803. * Returns: none
  804. */
  805. static inline
  806. void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
  807. bool is_high_tput)
  808. {
  809. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  810. qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
  811. }
  812. void
  813. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  814. hal_ring_handle_t hal_ring_hdl,
  815. int coalesce);
  816. #else
  817. #ifdef DP_POWER_SAVE
  818. void
  819. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  820. hal_ring_handle_t hal_ring_hdl,
  821. int coalesce);
  822. #else
  823. static inline void
  824. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  825. hal_ring_handle_t hal_ring_hdl,
  826. int coalesce)
  827. {
  828. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  829. }
  830. #endif
  831. static inline void
  832. dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
  833. bool is_high_tput)
  834. { }
  835. #endif
  836. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  837. #ifdef DP_TX_HW_DESC_HISTORY
  838. static inline void
  839. dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
  840. hal_ring_handle_t hal_ring_hdl,
  841. struct dp_soc *soc, uint8_t ring_id)
  842. {
  843. struct dp_tx_hw_desc_history *tx_hw_desc_history =
  844. &soc->tx_hw_desc_history;
  845. struct dp_tx_hw_desc_evt *evt;
  846. uint32_t idx = 0;
  847. uint16_t slot = 0;
  848. if (!tx_hw_desc_history->allocated)
  849. return;
  850. dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
  851. &slot,
  852. DP_TX_HW_DESC_HIST_SLOT_SHIFT,
  853. DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
  854. DP_TX_HW_DESC_HIST_MAX);
  855. evt = &tx_hw_desc_history->entry[slot][idx];
  856. qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  857. evt->posted = qdf_get_log_timestamp();
  858. evt->tcl_ring_id = ring_id;
  859. hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
  860. }
  861. #else
  862. static inline void
  863. dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
  864. hal_ring_handle_t hal_ring_hdl,
  865. struct dp_soc *soc, uint8_t ring_id)
  866. {
  867. }
  868. #endif
  869. #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
  870. /**
  871. * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
  872. * @ts: Tx completion status
  873. * @delta_tsf: Difference between TSF clock and qtimer
  874. * @delay_us: Delay in microseconds
  875. *
  876. * Return: QDF_STATUS_SUCCESS : Success
  877. * QDF_STATUS_E_INVAL : Tx completion status is invalid or
  878. * delay_us is NULL
  879. * QDF_STATUS_E_FAILURE : Error in delay calculation
  880. */
  881. QDF_STATUS
  882. dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
  883. uint32_t delta_tsf,
  884. uint32_t *delay_us);
  885. /**
  886. * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
  887. * @soc_hdl: cdp soc pointer
  888. * @vdev_id: vdev id
  889. * @delta_tsf: difference between TSF clock and qtimer
  890. *
  891. * Return: None
  892. */
  893. void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  894. uint32_t delta_tsf);
  895. #endif
  896. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  897. /**
  898. * dp_set_tsf_report_ul_delay() - Enable or disable reporting uplink delay
  899. * @soc_hdl: cdp soc pointer
  900. * @vdev_id: vdev id
  901. * @enable: true to enable and false to disable
  902. *
  903. * Return: QDF_STATUS
  904. */
  905. QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
  906. uint8_t vdev_id, bool enable);
  907. /**
  908. * dp_get_uplink_delay() - Get uplink delay value
  909. * @soc_hdl: cdp soc pointer
  910. * @vdev_id: vdev id
  911. * @val: pointer to save uplink delay value
  912. *
  913. * Return: QDF_STATUS
  914. */
  915. QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  916. uint32_t *val);
  917. #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
  918. /**
  919. * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
  920. *
  921. * Return: True if any tx pkt tracepoint is enabled else false
  922. */
  923. static inline
  924. bool dp_tx_pkt_tracepoints_enabled(void)
  925. {
  926. return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
  927. qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
  928. qdf_trace_dp_tx_comp_pkt_enabled());
  929. }
  930. #ifdef DP_TX_TRACKING
  931. /**
  932. * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
  933. * @tx_desc - tx descriptor
  934. *
  935. * Return: None
  936. */
  937. static inline
  938. void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
  939. {
  940. tx_desc->timestamp_tick = qdf_system_ticks();
  941. }
  942. /**
  943. * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
  944. * @tx_desc: tx descriptor
  945. *
  946. * Check for corruption in tx descriptor, if magic pattern is not matching
  947. * trigger self recovery
  948. *
  949. * Return: none
  950. */
  951. void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
  952. #else
  953. static inline
  954. void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
  955. {
  956. }
  957. static inline
  958. void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
  959. {
  960. }
  961. #endif
  962. #ifndef CONFIG_SAWF
  963. static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
  964. {
  965. return false;
  966. }
  967. #endif
  968. #ifdef HW_TX_DELAY_STATS_ENABLE
  969. /**
  970. * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
  971. * @vdev: DP vdev handle
  972. * @tx_desc: tx descriptor
  973. *
  974. * Return: true when descriptor is timestamped, false otherwise
  975. */
  976. static inline
  977. bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
  978. struct dp_tx_desc_s *tx_desc)
  979. {
  980. if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
  981. qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
  982. qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
  983. qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
  984. qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev))) {
  985. tx_desc->timestamp = qdf_ktime_real_get();
  986. return true;
  987. }
  988. return false;
  989. }
  990. #else
  991. static inline
  992. bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
  993. struct dp_tx_desc_s *tx_desc)
  994. {
  995. if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
  996. qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
  997. qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
  998. qdf_unlikely(vdev->pdev->soc->peerstats_enabled)) {
  999. tx_desc->timestamp = qdf_ktime_real_get();
  1000. return true;
  1001. }
  1002. return false;
  1003. }
  1004. #endif
  1005. #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
  1006. /**
  1007. * dp_pkt_add_timestamp() - add timestamp in data payload
  1008. *
  1009. * @vdev: dp vdev
  1010. * @index: index to decide offset in payload
  1011. * @time: timestamp to add in data payload
  1012. * @nbuf: network buffer
  1013. *
  1014. * Return: none
  1015. */
  1016. void dp_pkt_add_timestamp(struct dp_vdev *vdev,
  1017. enum qdf_pkt_timestamp_index index, uint64_t time,
  1018. qdf_nbuf_t nbuf);
  1019. /**
  1020. * dp_pkt_get_timestamp() - get current system time
  1021. *
  1022. * @time: return current system time
  1023. *
  1024. * Return: none
  1025. */
  1026. void dp_pkt_get_timestamp(uint64_t *time);
  1027. #else
  1028. #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
  1029. static inline
  1030. void dp_pkt_get_timestamp(uint64_t *time)
  1031. {
  1032. }
  1033. #endif
  1034. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  1035. /**
  1036. * dp_update_tx_desc_stats - Update the increase or decrease in
  1037. * outstanding tx desc count
  1038. * values on pdev and soc
  1039. * @vdev: DP pdev handle
  1040. *
  1041. * Return: void
  1042. */
  1043. static inline void
  1044. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  1045. {
  1046. int32_t tx_descs_cnt =
  1047. qdf_atomic_read(&pdev->num_tx_outstanding);
  1048. if (pdev->tx_descs_max < tx_descs_cnt)
  1049. pdev->tx_descs_max = tx_descs_cnt;
  1050. qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
  1051. pdev->tx_descs_max);
  1052. }
  1053. #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
  1054. static inline void
  1055. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  1056. {
  1057. }
  1058. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  1059. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  1060. /**
  1061. * dp_tx_get_global_desc_in_use() - read global descriptors in usage
  1062. * @dp_global: Datapath global context
  1063. *
  1064. * Return: global descriptors in use
  1065. */
  1066. static inline int32_t
  1067. dp_tx_get_global_desc_in_use(struct dp_global_context *dp_global)
  1068. {
  1069. return qdf_atomic_read(&dp_global->global_descriptor_in_use);
  1070. }
  1071. #endif
  1072. #ifdef QCA_TX_LIMIT_CHECK
  1073. static inline bool is_spl_packet(qdf_nbuf_t nbuf)
  1074. {
  1075. if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1076. return true;
  1077. return false;
  1078. }
  1079. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  1080. /**
  1081. * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
  1082. * allocation if allocated tx descriptors are within the global max limit
  1083. * and pdev max limit.
  1084. * @vdev: DP vdev handle
  1085. *
  1086. * Return: true if allocated tx descriptors reached max configured value, else
  1087. * false
  1088. */
  1089. static inline bool
  1090. is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1091. {
  1092. struct dp_pdev *pdev = vdev->pdev;
  1093. struct dp_soc *soc = pdev->soc;
  1094. struct dp_global_context *dp_global;
  1095. uint32_t global_tx_desc_allowed;
  1096. dp_global = wlan_objmgr_get_global_ctx();
  1097. global_tx_desc_allowed =
  1098. wlan_cfg_get_num_global_tx_desc(soc->wlan_cfg_ctx);
  1099. if (is_spl_packet(nbuf)) {
  1100. if (dp_tx_get_global_desc_in_use(dp_global) >=
  1101. global_tx_desc_allowed)
  1102. return true;
  1103. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  1104. pdev->num_tx_allowed)
  1105. return true;
  1106. return false;
  1107. }
  1108. return true;
  1109. }
  1110. /**
  1111. * dp_tx_limit_check - Check if allocated tx descriptors reached
  1112. * global max reg limit and pdev max reg limit for regular packets. Also check
  1113. * if the limit is reached for special packets.
  1114. * @vdev: DP vdev handle
  1115. *
  1116. * Return: true if allocated tx descriptors reached max limit for regular
  1117. * packets and in case of special packets, if the limit is reached max
  1118. * configured vale for the soc/pdev, else false
  1119. */
  1120. static inline bool
  1121. dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1122. {
  1123. struct dp_pdev *pdev = vdev->pdev;
  1124. struct dp_soc *soc = pdev->soc;
  1125. struct dp_global_context *dp_global;
  1126. uint32_t global_tx_desc_allowed;
  1127. uint32_t global_tx_desc_reg_allowed;
  1128. uint32_t global_tx_desc_spcl_allowed;
  1129. dp_global = wlan_objmgr_get_global_ctx();
  1130. global_tx_desc_allowed =
  1131. wlan_cfg_get_num_global_tx_desc(soc->wlan_cfg_ctx);
  1132. global_tx_desc_spcl_allowed =
  1133. wlan_cfg_get_num_global_spcl_tx_desc(soc->wlan_cfg_ctx);
  1134. global_tx_desc_reg_allowed = global_tx_desc_allowed -
  1135. global_tx_desc_spcl_allowed;
  1136. if (dp_tx_get_global_desc_in_use(dp_global) >= global_tx_desc_reg_allowed) {
  1137. if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
  1138. dp_tx_info("queued packets are more than max tx, drop the frame");
  1139. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1140. return true;
  1141. }
  1142. }
  1143. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  1144. pdev->num_reg_tx_allowed) {
  1145. if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
  1146. dp_tx_info("queued packets are more than max tx, drop the frame");
  1147. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1148. DP_STATS_INC(vdev,
  1149. tx_i.dropped.desc_na_exc_outstand.num, 1);
  1150. return true;
  1151. }
  1152. }
  1153. return false;
  1154. }
  1155. #else
  1156. /**
  1157. * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
  1158. * allocation if allocated tx descriptors are within the soc max limit
  1159. * and pdev max limit.
  1160. * @vdev: DP vdev handle
  1161. *
  1162. * Return: true if allocated tx descriptors reached max configured value, else
  1163. * false
  1164. */
  1165. static inline bool
  1166. is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1167. {
  1168. struct dp_pdev *pdev = vdev->pdev;
  1169. struct dp_soc *soc = pdev->soc;
  1170. if (is_spl_packet(nbuf)) {
  1171. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  1172. soc->num_tx_allowed)
  1173. return true;
  1174. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  1175. pdev->num_tx_allowed)
  1176. return true;
  1177. return false;
  1178. }
  1179. return true;
  1180. }
  1181. /**
  1182. * dp_tx_limit_check - Check if allocated tx descriptors reached
  1183. * soc max reg limit and pdev max reg limit for regular packets. Also check if
  1184. * the limit is reached for special packets.
  1185. * @vdev: DP vdev handle
  1186. *
  1187. * Return: true if allocated tx descriptors reached max limit for regular
  1188. * packets and in case of special packets, if the limit is reached max
  1189. * configured vale for the soc/pdev, else false
  1190. */
  1191. static inline bool
  1192. dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1193. {
  1194. struct dp_pdev *pdev = vdev->pdev;
  1195. struct dp_soc *soc = pdev->soc;
  1196. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  1197. soc->num_reg_tx_allowed) {
  1198. if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
  1199. dp_tx_info("queued packets are more than max tx, drop the frame");
  1200. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1201. return true;
  1202. }
  1203. }
  1204. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  1205. pdev->num_reg_tx_allowed) {
  1206. if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
  1207. dp_tx_info("queued packets are more than max tx, drop the frame");
  1208. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1209. DP_STATS_INC(vdev,
  1210. tx_i.dropped.desc_na_exc_outstand.num, 1);
  1211. return true;
  1212. }
  1213. }
  1214. return false;
  1215. }
  1216. #endif
  1217. /**
  1218. * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
  1219. * reached soc max limit
  1220. * @vdev: DP vdev handle
  1221. *
  1222. * Return: true if allocated tx descriptors reached max configured value, else
  1223. * false
  1224. */
  1225. static inline bool
  1226. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  1227. {
  1228. struct dp_pdev *pdev = vdev->pdev;
  1229. struct dp_soc *soc = pdev->soc;
  1230. if (qdf_atomic_read(&soc->num_tx_exception) >=
  1231. soc->num_msdu_exception_desc) {
  1232. dp_info("exc packets are more than max drop the exc pkt");
  1233. DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
  1234. return true;
  1235. }
  1236. return false;
  1237. }
  1238. #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
  1239. /**
  1240. * dp_tx_outstanding_inc - Inc outstanding tx desc values on global and pdev
  1241. * @vdev: DP pdev handle
  1242. *
  1243. * Return: void
  1244. */
  1245. static inline void
  1246. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  1247. {
  1248. struct dp_global_context *dp_global;
  1249. dp_global = wlan_objmgr_get_global_ctx();
  1250. qdf_atomic_inc(&dp_global->global_descriptor_in_use);
  1251. qdf_atomic_inc(&pdev->num_tx_outstanding);
  1252. dp_update_tx_desc_stats(pdev);
  1253. }
  1254. /**
  1255. * dp_tx_outstanding__dec - Dec outstanding tx desc values on global and pdev
  1256. * @vdev: DP pdev handle
  1257. *
  1258. * Return: void
  1259. */
  1260. static inline void
  1261. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  1262. {
  1263. struct dp_global_context *dp_global;
  1264. dp_global = wlan_objmgr_get_global_ctx();
  1265. qdf_atomic_dec(&dp_global->global_descriptor_in_use);
  1266. qdf_atomic_dec(&pdev->num_tx_outstanding);
  1267. dp_update_tx_desc_stats(pdev);
  1268. }
  1269. #else
  1270. /**
  1271. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  1272. * @vdev: DP pdev handle
  1273. *
  1274. * Return: void
  1275. */
  1276. static inline void
  1277. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  1278. {
  1279. struct dp_soc *soc = pdev->soc;
  1280. qdf_atomic_inc(&pdev->num_tx_outstanding);
  1281. qdf_atomic_inc(&soc->num_tx_outstanding);
  1282. dp_update_tx_desc_stats(pdev);
  1283. }
  1284. /**
  1285. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  1286. * @vdev: DP pdev handle
  1287. *
  1288. * Return: void
  1289. */
  1290. static inline void
  1291. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  1292. {
  1293. struct dp_soc *soc = pdev->soc;
  1294. qdf_atomic_dec(&pdev->num_tx_outstanding);
  1295. qdf_atomic_dec(&soc->num_tx_outstanding);
  1296. dp_update_tx_desc_stats(pdev);
  1297. }
  1298. #endif /* QCA_SUPPORT_DP_GLOBAL_CTX */
  1299. #else //QCA_TX_LIMIT_CHECK
  1300. static inline bool
  1301. dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1302. {
  1303. return false;
  1304. }
  1305. static inline bool
  1306. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  1307. {
  1308. return false;
  1309. }
  1310. static inline void
  1311. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  1312. {
  1313. qdf_atomic_inc(&pdev->num_tx_outstanding);
  1314. dp_update_tx_desc_stats(pdev);
  1315. }
  1316. static inline void
  1317. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  1318. {
  1319. qdf_atomic_dec(&pdev->num_tx_outstanding);
  1320. dp_update_tx_desc_stats(pdev);
  1321. }
  1322. #endif //QCA_TX_LIMIT_CHECK
  1323. /**
  1324. * dp_tx_get_pkt_len() - Get the packet length of a msdu
  1325. * @tx_desc: tx descriptor
  1326. *
  1327. * Return: Packet length of a msdu. If the packet is fragmented,
  1328. * it will return the single fragment length.
  1329. *
  1330. * In TSO mode, the msdu from stack will be fragmented into small
  1331. * fragments and each of these new fragments will be transmitted
  1332. * as an individual msdu.
  1333. *
  1334. * Please note that the length of a msdu from stack may be smaller
  1335. * than the length of the total length of the fragments it has been
  1336. * fragmentted because each of the fragments has a nbuf header.
  1337. */
  1338. static inline uint32_t dp_tx_get_pkt_len(struct dp_tx_desc_s *tx_desc)
  1339. {
  1340. return tx_desc->frm_type == dp_tx_frm_tso ?
  1341. tx_desc->msdu_ext_desc->tso_desc->seg.total_len :
  1342. qdf_nbuf_len(tx_desc->nbuf);
  1343. }
  1344. #endif