dp_tx.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef __DP_TX_H
  19. #define __DP_TX_H
  20. #include <qdf_types.h>
  21. #include <qdf_nbuf.h>
  22. #include "dp_types.h"
  23. #define DP_TX_MAX_NUM_FRAGS 6
  24. #define DP_TX_DESC_FLAG_ALLOCATED 0x1
  25. #define DP_TX_DESC_FLAG_TO_FW 0x2
  26. #define DP_TX_DESC_FLAG_FRAG 0x4
  27. #define DP_TX_DESC_FLAG_RAW 0x8
  28. #define DP_TX_DESC_FLAG_MESH 0x10
  29. #define DP_TX_DESC_FLAG_QUEUED_TX 0x20
  30. #define DP_TX_DESC_FLAG_COMPLETED_TX 0x40
  31. #define DP_TX_DESC_FLAG_ME 0x80
  32. #define DP_TX_DESC_FLAG_TDLS_FRAME 0x100
  33. #define DP_TX_FREE_SINGLE_BUF(soc, buf) \
  34. do { \
  35. qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \
  36. qdf_nbuf_free(buf); \
  37. } while (0)
  38. #define OCB_HEADER_VERSION 1
  39. #ifdef TX_PER_PDEV_DESC_POOL
  40. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  41. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  42. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  43. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
  44. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  45. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  46. #else
  47. #ifdef TX_PER_VDEV_DESC_POOL
  48. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  49. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  50. #endif /* TX_PER_VDEV_DESC_POOL */
  51. #endif /* TX_PER_PDEV_DESC_POOL */
  52. #define DP_TX_QUEUE_MASK 0x3
  53. /**
  54. * struct dp_tx_frag_info_s
  55. * @vaddr: hlos vritual address for buffer
  56. * @paddr_lo: physical address lower 32bits
  57. * @paddr_hi: physical address higher bits
  58. * @len: length of the buffer
  59. */
  60. struct dp_tx_frag_info_s {
  61. uint8_t *vaddr;
  62. uint32_t paddr_lo;
  63. uint16_t paddr_hi;
  64. uint16_t len;
  65. };
  66. /**
  67. * struct dp_tx_seg_info_s - Segmentation Descriptor
  68. * @nbuf: NBUF pointer if segment corresponds to separate nbuf
  69. * @frag_cnt: Fragment count in this segment
  70. * @total_len: Total length of segment
  71. * @frags: per-Fragment information
  72. * @next: pointer to next MSDU segment
  73. */
  74. struct dp_tx_seg_info_s {
  75. qdf_nbuf_t nbuf;
  76. uint16_t frag_cnt;
  77. uint16_t total_len;
  78. struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
  79. struct dp_tx_seg_info_s *next;
  80. };
  81. /**
  82. * struct dp_tx_sg_info_s - Scatter Gather Descriptor
  83. * @num_segs: Number of segments (TSO/ME) in the frame
  84. * @total_len: Total length of the frame
  85. * @curr_seg: Points to current segment descriptor to be processed. Chain of
  86. * descriptors for SG frames/multicast-unicast converted packets.
  87. *
  88. * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
  89. * carry fragmentation information
  90. * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
  91. * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
  92. * converted into set of skb sg (nr_frags) structures.
  93. */
  94. struct dp_tx_sg_info_s {
  95. uint32_t num_segs;
  96. uint32_t total_len;
  97. struct dp_tx_seg_info_s *curr_seg;
  98. };
  99. /**
  100. * struct dp_tx_queue - Tx queue
  101. * @desc_pool_id: Descriptor Pool to be used for the tx queue
  102. * @ring_id: TCL descriptor ring ID corresponding to the tx queue
  103. *
  104. * Tx queue contains information of the software (Descriptor pool)
  105. * and hardware resources (TCL ring id) to be used for a particular
  106. * transmit queue (obtained from skb_queue_mapping in case of linux)
  107. */
  108. struct dp_tx_queue {
  109. uint8_t desc_pool_id;
  110. uint8_t ring_id;
  111. };
  112. /**
  113. * struct dp_tx_msdu_info_s - MSDU Descriptor
  114. * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
  115. * @tx_queue: Tx queue on which this MSDU should be transmitted
  116. * @num_seg: Number of segments (TSO)
  117. * @tid: TID (override) that is sent from HLOS
  118. * @u.tso_info: TSO information for TSO frame types
  119. * (chain of the TSO segments, number of segments)
  120. * @u.sg_info: Scatter Gather information for non-TSO SG frames
  121. * @meta_data: Mesh meta header information
  122. * @exception_fw: Duplicate frame to be sent to firmware
  123. * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
  124. * @ix_tx_sniffer: Indicates if the packet has to be sniffed
  125. *
  126. * This structure holds the complete MSDU information needed to program the
  127. * Hardware TCL and MSDU extension descriptors for different frame types
  128. *
  129. */
  130. struct dp_tx_msdu_info_s {
  131. enum dp_tx_frm_type frm_type;
  132. struct dp_tx_queue tx_queue;
  133. uint32_t num_seg;
  134. uint8_t tid;
  135. union {
  136. struct qdf_tso_info_t tso_info;
  137. struct dp_tx_sg_info_s sg_info;
  138. } u;
  139. uint32_t meta_data[7];
  140. uint8_t exception_fw;
  141. uint16_t ppdu_cookie;
  142. uint8_t is_tx_sniffer;
  143. };
  144. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
  145. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
  146. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
  147. QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc);
  148. QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc);
  149. /**
  150. * dp_tso_attach() - TSO Attach handler
  151. * @txrx_soc: Opaque Dp handle
  152. *
  153. * Reserve TSO descriptor buffers
  154. *
  155. * Return: QDF_STATUS_E_FAILURE on failure or
  156. * QDF_STATUS_SUCCESS on success
  157. */
  158. QDF_STATUS dp_tso_soc_attach(void *txrx_soc);
  159. /**
  160. * dp_tso_detach() - TSO Detach handler
  161. * @txrx_soc: Opaque Dp handle
  162. *
  163. * Deallocate TSO descriptor buffers
  164. *
  165. * Return: QDF_STATUS_E_FAILURE on failure or
  166. * QDF_STATUS_SUCCESS on success
  167. */
  168. QDF_STATUS dp_tso_soc_detach(void *txrx_soc);
  169. QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev);
  170. QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev);
  171. qdf_nbuf_t dp_tx_send(struct cdp_vdev *data_vdev, qdf_nbuf_t nbuf);
  172. qdf_nbuf_t dp_tx_send_exception(struct cdp_vdev *data_vdev, qdf_nbuf_t nbuf,
  173. struct cdp_tx_exception_metadata *tx_exc);
  174. qdf_nbuf_t dp_tx_send_mesh(struct cdp_vdev *data_vdev, qdf_nbuf_t nbuf);
  175. qdf_nbuf_t
  176. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  177. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  178. struct cdp_tx_exception_metadata *tx_exc_metadata);
  179. #if QDF_LOCK_STATS
  180. noinline qdf_nbuf_t
  181. dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  182. struct dp_tx_msdu_info_s *msdu_info);
  183. #else
  184. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  185. struct dp_tx_msdu_info_s *msdu_info);
  186. #endif
  187. #ifdef FEATURE_WLAN_TDLS
  188. /**
  189. * dp_tx_non_std() - Allow the control-path SW to send data frames
  190. * @soc_hdl: Datapath soc handle
  191. * @vdev_id: id of vdev
  192. * @tx_spec: what non-standard handling to apply to the tx data frames
  193. * @msdu_list: NULL-terminated list of tx MSDUs
  194. *
  195. * Return: NULL on success,
  196. * nbuf when it fails to send
  197. */
  198. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  199. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  200. #endif
  201. /**
  202. * dp_tx_comp_handler() - Tx completion handler
  203. * @int_ctx: pointer to DP interrupt context
  204. * @soc: core txrx main context
  205. * @hal_srng: Opaque HAL SRNG pointer
  206. * @ring_id: completion ring id
  207. * @quota: No. of packets/descriptors that can be serviced in one loop
  208. *
  209. * This function will collect hardware release ring element contents and
  210. * handle descriptor contents. Based on contents, free packet or handle error
  211. * conditions
  212. *
  213. * Return: Number of TX completions processed
  214. */
  215. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  216. hal_ring_handle_t hal_srng, uint8_t ring_id,
  217. uint32_t quota);
  218. QDF_STATUS
  219. dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
  220. #ifndef FEATURE_WDS
  221. static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
  222. {
  223. return;
  224. }
  225. #endif
  226. #ifndef ATH_SUPPORT_IQUE
  227. static inline void dp_tx_me_exit(struct dp_pdev *pdev)
  228. {
  229. return;
  230. }
  231. #endif
  232. #ifndef QCA_MULTIPASS_SUPPORT
  233. static inline
  234. bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  235. qdf_nbuf_t nbuf,
  236. struct dp_tx_msdu_info_s *msdu_info)
  237. {
  238. return true;
  239. }
  240. static inline
  241. void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
  242. {
  243. }
  244. #else
  245. bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  246. qdf_nbuf_t nbuf,
  247. struct dp_tx_msdu_info_s *msdu_info);
  248. void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
  249. #endif
  250. /**
  251. * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
  252. * @vdev: DP Virtual device handle
  253. * @nbuf: Buffer pointer
  254. * @queue: queue ids container for nbuf
  255. *
  256. * TX packet queue has 2 instances, software descriptors id and dma ring id
  257. * Based on tx feature and hardware configuration queue id combination could be
  258. * different.
  259. * For example -
  260. * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
  261. * With no XPS,lock based resource protection, Descriptor pool ids are different
  262. * for each vdev, dma ring id will be same as single pdev id
  263. *
  264. * Return: None
  265. */
  266. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  267. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  268. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  269. {
  270. uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) &
  271. DP_TX_QUEUE_MASK;
  272. queue->desc_pool_id = queue_offset;
  273. queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
  274. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  275. "%s, pool_id:%d ring_id: %d",
  276. __func__, queue->desc_pool_id, queue->ring_id);
  277. }
  278. #else /* QCA_OL_TX_MULTIQ_SUPPORT */
  279. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  280. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  281. {
  282. /* get flow id */
  283. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  284. queue->ring_id = DP_TX_GET_RING_ID(vdev);
  285. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  286. "%s, pool_id:%d ring_id: %d",
  287. __func__, queue->desc_pool_id, queue->ring_id);
  288. }
  289. #endif
  290. #ifdef FEATURE_PERPKT_INFO
  291. QDF_STATUS
  292. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  293. struct dp_pdev *pdev,
  294. struct dp_peer *peer,
  295. struct hal_tx_completion_status *ts,
  296. qdf_nbuf_t netbuf,
  297. uint64_t time_latency);
  298. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  299. uint16_t peer_id, uint32_t ppdu_id,
  300. qdf_nbuf_t netbuf);
  301. #endif
  302. void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl);
  303. #ifdef ATH_TX_PRI_OVERRIDE
  304. #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
  305. ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
  306. #else
  307. #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
  308. #endif
  309. void
  310. dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
  311. uint32_t buf_type);
  312. /* TODO TX_FEATURE_NOT_YET */
  313. static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
  314. {
  315. return;
  316. }
  317. /* TODO TX_FEATURE_NOT_YET */
  318. #endif