dp_tx.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef __DP_TX_H
  19. #define __DP_TX_H
  20. #include <qdf_types.h>
  21. #include <qdf_nbuf.h>
  22. #include "dp_types.h"
  23. #define DP_TX_MAX_NUM_FRAGS 6
  24. #define DP_TX_DESC_FLAG_ALLOCATED 0x1
  25. #define DP_TX_DESC_FLAG_TO_FW 0x2
  26. #define DP_TX_DESC_FLAG_FRAG 0x4
  27. #define DP_TX_DESC_FLAG_RAW 0x8
  28. #define DP_TX_DESC_FLAG_MESH 0x10
  29. #define DP_TX_DESC_FLAG_QUEUED_TX 0x20
  30. #define DP_TX_DESC_FLAG_COMPLETED_TX 0x40
  31. #define DP_TX_DESC_FLAG_ME 0x80
  32. #define DP_TX_DESC_FLAG_TDLS_FRAME 0x100
  33. #define DP_TX_FREE_SINGLE_BUF(soc, buf) \
  34. do { \
  35. qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE); \
  36. qdf_nbuf_free(buf); \
  37. } while (0)
  38. #define OCB_HEADER_VERSION 1
  39. #ifdef TX_PER_PDEV_DESC_POOL
  40. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  41. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  42. #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
  43. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
  44. #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
  45. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  46. #else
  47. #ifdef TX_PER_VDEV_DESC_POOL
  48. #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
  49. #define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
  50. #endif /* TX_PER_VDEV_DESC_POOL */
  51. #endif /* TX_PER_PDEV_DESC_POOL */
  52. #define DP_TX_QUEUE_MASK 0x3
  53. /**
  54. * struct dp_tx_frag_info_s
  55. * @vaddr: hlos vritual address for buffer
  56. * @paddr_lo: physical address lower 32bits
  57. * @paddr_hi: physical address higher bits
  58. * @len: length of the buffer
  59. */
  60. struct dp_tx_frag_info_s {
  61. uint8_t *vaddr;
  62. uint32_t paddr_lo;
  63. uint16_t paddr_hi;
  64. uint16_t len;
  65. };
  66. /**
  67. * struct dp_tx_seg_info_s - Segmentation Descriptor
  68. * @nbuf: NBUF pointer if segment corresponds to separate nbuf
  69. * @frag_cnt: Fragment count in this segment
  70. * @total_len: Total length of segment
  71. * @frags: per-Fragment information
  72. * @next: pointer to next MSDU segment
  73. */
  74. struct dp_tx_seg_info_s {
  75. qdf_nbuf_t nbuf;
  76. uint16_t frag_cnt;
  77. uint16_t total_len;
  78. struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
  79. struct dp_tx_seg_info_s *next;
  80. };
  81. /**
  82. * struct dp_tx_sg_info_s - Scatter Gather Descriptor
  83. * @num_segs: Number of segments (TSO/ME) in the frame
  84. * @total_len: Total length of the frame
  85. * @curr_seg: Points to current segment descriptor to be processed. Chain of
  86. * descriptors for SG frames/multicast-unicast converted packets.
  87. *
  88. * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
  89. * carry fragmentation information
  90. * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
  91. * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
  92. * converted into set of skb sg (nr_frags) structures.
  93. */
  94. struct dp_tx_sg_info_s {
  95. uint32_t num_segs;
  96. uint32_t total_len;
  97. struct dp_tx_seg_info_s *curr_seg;
  98. };
  99. /**
  100. * struct dp_tx_queue - Tx queue
  101. * @desc_pool_id: Descriptor Pool to be used for the tx queue
  102. * @ring_id: TCL descriptor ring ID corresponding to the tx queue
  103. *
  104. * Tx queue contains information of the software (Descriptor pool)
  105. * and hardware resources (TCL ring id) to be used for a particular
  106. * transmit queue (obtained from skb_queue_mapping in case of linux)
  107. */
  108. struct dp_tx_queue {
  109. uint8_t desc_pool_id;
  110. uint8_t ring_id;
  111. };
  112. /**
  113. * struct dp_tx_msdu_info_s - MSDU Descriptor
  114. * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
  115. * @tx_queue: Tx queue on which this MSDU should be transmitted
  116. * @num_seg: Number of segments (TSO)
  117. * @tid: TID (override) that is sent from HLOS
  118. * @u.tso_info: TSO information for TSO frame types
  119. * (chain of the TSO segments, number of segments)
  120. * @u.sg_info: Scatter Gather information for non-TSO SG frames
  121. * @meta_data: Mesh meta header information
  122. * @exception_fw: Duplicate frame to be sent to firmware
  123. * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
  124. * @ix_tx_sniffer: Indicates if the packet has to be sniffed
  125. *
  126. * This structure holds the complete MSDU information needed to program the
  127. * Hardware TCL and MSDU extension descriptors for different frame types
  128. *
  129. */
  130. struct dp_tx_msdu_info_s {
  131. enum dp_tx_frm_type frm_type;
  132. struct dp_tx_queue tx_queue;
  133. uint32_t num_seg;
  134. uint8_t tid;
  135. union {
  136. struct qdf_tso_info_t tso_info;
  137. struct dp_tx_sg_info_s sg_info;
  138. } u;
  139. uint32_t meta_data[7];
  140. uint8_t exception_fw;
  141. uint16_t ppdu_cookie;
  142. uint8_t is_tx_sniffer;
  143. };
  144. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
  145. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
  146. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
  147. QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc);
  148. QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc);
  149. /**
  150. * dp_tso_attach() - TSO Attach handler
  151. * @txrx_soc: Opaque Dp handle
  152. *
  153. * Reserve TSO descriptor buffers
  154. *
  155. * Return: QDF_STATUS_E_FAILURE on failure or
  156. * QDF_STATUS_SUCCESS on success
  157. */
  158. QDF_STATUS dp_tso_soc_attach(void *txrx_soc);
  159. /**
  160. * dp_tso_detach() - TSO Detach handler
  161. * @txrx_soc: Opaque Dp handle
  162. *
  163. * Deallocate TSO descriptor buffers
  164. *
  165. * Return: QDF_STATUS_E_FAILURE on failure or
  166. * QDF_STATUS_SUCCESS on success
  167. */
  168. QDF_STATUS dp_tso_soc_detach(void *txrx_soc);
  169. QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev);
  170. QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev);
  171. qdf_nbuf_t dp_tx_send(struct cdp_vdev *data_vdev, qdf_nbuf_t nbuf);
  172. qdf_nbuf_t dp_tx_send_exception(struct cdp_vdev *data_vdev, qdf_nbuf_t nbuf,
  173. struct cdp_tx_exception_metadata *tx_exc);
  174. qdf_nbuf_t dp_tx_send_mesh(struct cdp_vdev *data_vdev, qdf_nbuf_t nbuf);
  175. qdf_nbuf_t
  176. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  177. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  178. struct cdp_tx_exception_metadata *tx_exc_metadata);
  179. #if QDF_LOCK_STATS
  180. noinline qdf_nbuf_t
  181. dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  182. struct dp_tx_msdu_info_s *msdu_info);
  183. #else
  184. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  185. struct dp_tx_msdu_info_s *msdu_info);
  186. #endif
  187. #ifdef FEATURE_WLAN_TDLS
  188. qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
  189. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  190. #endif
  191. /**
  192. * dp_tx_comp_handler() - Tx completion handler
  193. * @int_ctx: pointer to DP interrupt context
  194. * @soc: core txrx main context
  195. * @hal_srng: Opaque HAL SRNG pointer
  196. * @ring_id: completion ring id
  197. * @quota: No. of packets/descriptors that can be serviced in one loop
  198. *
  199. * This function will collect hardware release ring element contents and
  200. * handle descriptor contents. Based on contents, free packet or handle error
  201. * conditions
  202. *
  203. * Return: Number of TX completions processed
  204. */
  205. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  206. hal_ring_handle_t hal_srng, uint8_t ring_id,
  207. uint32_t quota);
  208. QDF_STATUS
  209. dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
  210. #ifndef FEATURE_WDS
  211. static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
  212. {
  213. return;
  214. }
  215. #endif
  216. #ifndef ATH_SUPPORT_IQUE
  217. static inline void dp_tx_me_exit(struct dp_pdev *pdev)
  218. {
  219. return;
  220. }
  221. #endif
  222. #ifndef QCA_MULTIPASS_SUPPORT
  223. static inline
  224. bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  225. qdf_nbuf_t nbuf,
  226. struct dp_tx_msdu_info_s *msdu_info)
  227. {
  228. return true;
  229. }
  230. static inline
  231. void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
  232. {
  233. }
  234. #else
  235. bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
  236. qdf_nbuf_t nbuf,
  237. struct dp_tx_msdu_info_s *msdu_info);
  238. void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
  239. #endif
  240. /**
  241. * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
  242. * @vdev: DP Virtual device handle
  243. * @nbuf: Buffer pointer
  244. * @queue: queue ids container for nbuf
  245. *
  246. * TX packet queue has 2 instances, software descriptors id and dma ring id
  247. * Based on tx feature and hardware configuration queue id combination could be
  248. * different.
  249. * For example -
  250. * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
  251. * With no XPS,lock based resource protection, Descriptor pool ids are different
  252. * for each vdev, dma ring id will be same as single pdev id
  253. *
  254. * Return: None
  255. */
  256. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  257. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  258. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  259. {
  260. uint16_t queue_offset = qdf_nbuf_get_queue_mapping(nbuf) &
  261. DP_TX_QUEUE_MASK;
  262. queue->desc_pool_id = queue_offset;
  263. queue->ring_id = vdev->pdev->soc->tx_ring_map[queue_offset];
  264. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  265. "%s, pool_id:%d ring_id: %d",
  266. __func__, queue->desc_pool_id, queue->ring_id);
  267. }
  268. #else /* QCA_OL_TX_MULTIQ_SUPPORT */
  269. static inline void dp_tx_get_queue(struct dp_vdev *vdev,
  270. qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
  271. {
  272. /* get flow id */
  273. queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
  274. queue->ring_id = DP_TX_GET_RING_ID(vdev);
  275. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  276. "%s, pool_id:%d ring_id: %d",
  277. __func__, queue->desc_pool_id, queue->ring_id);
  278. }
  279. #endif
  280. #ifdef FEATURE_PERPKT_INFO
  281. QDF_STATUS
  282. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  283. struct dp_pdev *pdev,
  284. struct dp_peer *peer,
  285. struct hal_tx_completion_status *ts,
  286. qdf_nbuf_t netbuf,
  287. uint64_t time_latency);
  288. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  289. uint16_t peer_id, uint32_t ppdu_id,
  290. qdf_nbuf_t netbuf);
  291. #endif
  292. void dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl);
  293. #ifdef ATH_TX_PRI_OVERRIDE
  294. #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
  295. ((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
  296. #else
  297. #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
  298. #endif
  299. /* TODO TX_FEATURE_NOT_YET */
  300. static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
  301. {
  302. return;
  303. }
  304. /* TODO TX_FEATURE_NOT_YET */
  305. #endif