dp_rh_tx.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /*
  2. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "cdp_txrx_cmn_struct.h"
  19. #include "dp_types.h"
  20. #include "dp_tx.h"
  21. #include "dp_rh_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include <dp_internal.h>
  24. #include <dp_htt.h>
  25. #include <hal_rh_api.h>
  26. #include <hal_rh_tx.h>
  27. #include "dp_peer.h"
  28. #include "dp_rh.h"
  29. #include <ce_api.h>
  30. #include <ce_internal.h>
  31. #include "dp_rh_htt.h"
  32. extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
  33. #if defined(FEATURE_TSO)
  34. /**
  35. * dp_tx_adjust_tso_download_len_rh() - Adjust download length for TSO packet
  36. * @nbuf: socket buffer
  37. * @msdu_info: handle to struct dp_tx_msdu_info_s
  38. * @download_len: Packet download length that needs adjustment
  39. *
  40. * Return: uint32_t (Adjusted packet download length)
  41. */
  42. static uint32_t
  43. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  44. struct dp_tx_msdu_info_s *msdu_info,
  45. uint32_t download_len)
  46. {
  47. uint32_t frag0_len;
  48. uint32_t delta;
  49. uint32_t eit_hdr_len;
  50. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  51. download_len -= frag0_len;
  52. eit_hdr_len = msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].length;
  53. /* If EIT header length is less than the MSDU download length, then
  54. * adjust the download length to just hold EIT header.
  55. */
  56. if (eit_hdr_len < download_len) {
  57. delta = download_len - eit_hdr_len;
  58. download_len -= delta;
  59. }
  60. return download_len;
  61. }
  62. #else
  63. static uint32_t
  64. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  65. struct dp_tx_msdu_info_s *msdu_info,
  66. uint32_t download_len)
  67. {
  68. return download_len;
  69. }
  70. #endif /* FEATURE_TSO */
  71. QDF_STATUS
  72. dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc *soc,
  73. void *tx_comp_hal_desc,
  74. struct dp_tx_desc_s **r_tx_desc)
  75. {
  76. return QDF_STATUS_SUCCESS;
  77. }
  78. /**
  79. * dp_tx_comp_find_tx_desc_rh() - Find software TX descriptor using sw_cookie
  80. *
  81. * @soc: Handle to DP SoC structure
  82. * @sw_cookie: Key to find the TX descriptor
  83. *
  84. * Return: TX descriptor handle or NULL (if not found)
  85. */
  86. static struct dp_tx_desc_s *
  87. dp_tx_comp_find_tx_desc_rh(struct dp_soc *soc, uint32_t sw_cookie)
  88. {
  89. uint8_t pool_id;
  90. struct dp_tx_desc_s *tx_desc;
  91. pool_id = (sw_cookie & DP_TX_DESC_ID_POOL_MASK) >>
  92. DP_TX_DESC_ID_POOL_OS;
  93. /* Find Tx descriptor */
  94. tx_desc = dp_tx_desc_find(soc, pool_id,
  95. (sw_cookie & DP_TX_DESC_ID_PAGE_MASK) >>
  96. DP_TX_DESC_ID_PAGE_OS,
  97. (sw_cookie & DP_TX_DESC_ID_OFFSET_MASK) >>
  98. DP_TX_DESC_ID_OFFSET_OS, false);
  99. /* pool id is not matching. Error */
  100. if (tx_desc && tx_desc->pool_id != pool_id) {
  101. dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
  102. pool_id, tx_desc->pool_id);
  103. qdf_assert_always(0);
  104. }
  105. return tx_desc;
  106. }
  107. void dp_tx_process_htt_completion_rh(struct dp_soc *soc,
  108. struct dp_tx_desc_s *tx_desc,
  109. uint8_t *status,
  110. uint8_t ring_id)
  111. {
  112. }
  113. static inline uint32_t
  114. dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf, uint32_t download_len)
  115. {
  116. uint32_t frag0_len; /* TCL_DATA_CMD */
  117. uint32_t frag1_len; /* 64 byte payload */
  118. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  119. frag1_len = download_len - frag0_len;
  120. if (qdf_unlikely(qdf_nbuf_len(nbuf) < frag1_len))
  121. frag1_len = qdf_nbuf_len(nbuf);
  122. return frag0_len + frag1_len;
  123. }
  124. static inline void dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)
  125. {
  126. uint32_t pkt_offset;
  127. uint32_t tx_classify;
  128. uint32_t data_attr;
  129. /* Enable tx_classify bit in CE SRC DESC for all data packets */
  130. tx_classify = 1;
  131. pkt_offset = qdf_nbuf_get_frag_len(nbuf, 0);
  132. data_attr = tx_classify << CE_DESC_TX_CLASSIFY_BIT_S;
  133. data_attr |= pkt_offset << CE_DESC_PKT_OFFSET_BIT_S;
  134. qdf_nbuf_data_attr_set(nbuf, data_attr);
  135. }
  136. #ifdef DP_TX_HW_DESC_HISTORY
  137. static inline void
  138. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  139. {
  140. struct dp_tx_hw_desc_history *tx_hw_desc_history =
  141. &soc->tx_hw_desc_history;
  142. struct dp_tx_hw_desc_evt *evt;
  143. uint32_t idx = 0;
  144. uint16_t slot = 0;
  145. if (!tx_hw_desc_history->allocated)
  146. return;
  147. dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
  148. &slot,
  149. DP_TX_HW_DESC_HIST_SLOT_SHIFT,
  150. DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
  151. DP_TX_HW_DESC_HIST_MAX);
  152. evt = &tx_hw_desc_history->entry[slot][idx];
  153. qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  154. evt->posted = qdf_get_log_timestamp();
  155. evt->tcl_ring_id = 0;
  156. }
  157. #else
  158. static inline void
  159. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  160. {
  161. }
  162. #endif
  163. #if defined(FEATURE_RUNTIME_PM)
  164. static void dp_tx_update_write_index(struct dp_soc *soc,
  165. struct dp_tx_ep_info_rh *tx_ep_info,
  166. int coalesce)
  167. {
  168. int ret;
  169. /* Avoid runtime get and put APIs under high throughput scenarios */
  170. if (dp_get_rtpm_tput_policy_requirement(soc)) {
  171. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  172. coalesce);
  173. return;
  174. }
  175. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  176. if (QDF_IS_STATUS_SUCCESS(ret)) {
  177. if (hif_system_pm_state_check(soc->hif_handle)) {
  178. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  179. CE_RING_FLUSH_EVENT);
  180. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  181. } else {
  182. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  183. coalesce);
  184. }
  185. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  186. } else {
  187. dp_runtime_get(soc);
  188. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  189. CE_RING_FLUSH_EVENT);
  190. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  191. qdf_atomic_inc(&soc->tx_pending_rtpm);
  192. dp_runtime_put(soc);
  193. }
  194. }
  195. #elif defined(DP_POWER_SAVE)
  196. static void dp_tx_update_write_index(struct dp_soc *soc,
  197. struct dp_tx_ep_info_rh *tx_ep_info)
  198. {
  199. if (hif_system_pm_state_check(soc->hif_handle)) {
  200. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  201. CE_RING_FLUSH_EVENT);
  202. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  203. } else {
  204. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  205. coalesce);
  206. }
  207. }
  208. #else
  209. static void dp_tx_update_write_index(struct dp_soc *soc,
  210. struct dp_tx_ep_info_rh *tx_ep_info)
  211. {
  212. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  213. coalesce);
  214. }
  215. #endif
  216. /*
  217. * dp_flush_tx_ring_rh() - flush tx ring write index
  218. * @pdev: dp pdev handle
  219. * @ring_id: Tx ring id
  220. *
  221. * Return: 0 on success and error code on failure
  222. */
  223. int dp_flush_tx_ring_rh(struct dp_pdev *pdev, int ring_id)
  224. {
  225. struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(pdev);
  226. struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
  227. int ret;
  228. ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
  229. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  230. if (ret) {
  231. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  232. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  233. CE_RING_FLUSH_EVENT);
  234. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  235. return ret;
  236. }
  237. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
  238. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  239. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  240. return ret;
  241. }
  242. QDF_STATUS
  243. dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
  244. struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
  245. struct cdp_tx_exception_metadata *tx_exc_metadata,
  246. struct dp_tx_msdu_info_s *msdu_info)
  247. {
  248. struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(vdev->pdev);
  249. struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
  250. uint32_t download_len = tx_ep_info->download_len;
  251. qdf_nbuf_t nbuf = tx_desc->nbuf;
  252. uint8_t tid = msdu_info->tid;
  253. uint32_t *hal_tx_desc_cached;
  254. int coalesce = 0;
  255. int ret;
  256. /*
  257. * Setting it initialization statically here to avoid
  258. * a memset call jump with qdf_mem_set call
  259. */
  260. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  261. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  262. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  263. tx_exc_metadata->sec_type : vdev->sec_type);
  264. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  265. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  266. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  267. return QDF_STATUS_E_RESOURCES;
  268. }
  269. hal_tx_desc_cached = (void *)cached_desc;
  270. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  271. tx_desc->dma_addr, 0, tx_desc->id,
  272. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
  273. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  274. vdev->lmac_id);
  275. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  276. vdev->search_type);
  277. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  278. vdev->bss_ast_idx);
  279. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  280. sec_type_map[sec_type]);
  281. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  282. (vdev->bss_ast_hash & 0xF));
  283. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  284. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  285. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  286. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  287. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  288. vdev->hal_desc_addr_search_flags);
  289. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  290. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  291. /* verify checksum offload configuration*/
  292. if ((qdf_nbuf_get_tx_cksum(nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) ||
  293. qdf_nbuf_is_tso(nbuf)) {
  294. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  295. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  296. }
  297. if (tid != HTT_TX_EXT_TID_INVALID)
  298. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  299. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  300. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  301. if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
  302. dp_tx_desc_set_timestamp(tx_desc);
  303. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  304. tx_desc->length,
  305. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
  306. (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
  307. tx_desc->id);
  308. hal_tx_desc_sync(hal_tx_desc_cached, tx_desc->tcl_cmd_vaddr);
  309. qdf_nbuf_frag_push_head(nbuf, DP_RH_TX_TCL_DESC_SIZE,
  310. (char *)tx_desc->tcl_cmd_vaddr,
  311. tx_desc->tcl_cmd_paddr);
  312. download_len = dp_tx_adjust_download_len_rh(nbuf, download_len);
  313. if (qdf_nbuf_is_tso(nbuf)) {
  314. QDF_NBUF_CB_PADDR(nbuf) =
  315. msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].paddr;
  316. download_len = dp_tx_adjust_tso_download_len_rh(nbuf, msdu_info,
  317. download_len);
  318. }
  319. dp_tx_fill_nbuf_data_attr_rh(nbuf);
  320. ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
  321. ret = ce_enqueue_desc(tx_ep_info->ce_tx_hdl, nbuf,
  322. tx_ep_info->tx_endpoint, download_len);
  323. if (ret) {
  324. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  325. dp_verbose_debug("CE tx ring full");
  326. /* TODO: Should this be a separate ce_ring_full stat? */
  327. DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
  328. DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail, 1);
  329. goto enqueue_fail;
  330. }
  331. coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
  332. msdu_info, 0);
  333. dp_tx_update_write_index(soc, tx_ep_info, coalesce);
  334. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  335. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  336. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
  337. DP_STATS_INC_PKT(vdev, tx_i[DP_XMIT_LINK].processed, 1,
  338. tx_desc->length);
  339. DP_STATS_INC(soc, tx.tcl_enq[0], 1);
  340. dp_tx_update_stats(soc, tx_desc, 0);
  341. status = QDF_STATUS_SUCCESS;
  342. dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc);
  343. enqueue_fail:
  344. dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
  345. qdf_get_log_timestamp(), tx_desc->nbuf);
  346. return status;
  347. }
  348. /**
  349. * dp_tx_tcl_desc_pool_alloc_rh() - Allocate the tcl descriptor pool
  350. * based on pool_id
  351. * @soc: Handle to DP SoC structure
  352. * @num_elem: Number of descriptor elements per pool
  353. * @pool_id: Pool to allocate
  354. *
  355. * Return: QDF_STATUS_SUCCESS
  356. * QDF_STATUS_E_NOMEM
  357. */
  358. static QDF_STATUS
  359. dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  360. uint8_t pool_id)
  361. {
  362. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  363. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  364. uint16_t elem_size = DP_RH_TX_TCL_DESC_SIZE;
  365. QDF_STATUS status = QDF_STATUS_SUCCESS;
  366. qdf_dma_context_t memctx = 0;
  367. if (pool_id > MAX_TXDESC_POOLS - 1)
  368. return QDF_STATUS_E_INVAL;
  369. /* Allocate tcl descriptors in coherent memory */
  370. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  371. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  372. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TCL_DESC_TYPE,
  373. &tcl_desc_pool->desc_pages,
  374. elem_size, num_elem, memctx, false);
  375. if (!tcl_desc_pool->desc_pages.num_pages) {
  376. dp_err("failed to allocate tcl desc Pages");
  377. status = QDF_STATUS_E_NOMEM;
  378. goto err_alloc_fail;
  379. }
  380. return status;
  381. err_alloc_fail:
  382. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
  383. &tcl_desc_pool->desc_pages,
  384. memctx, false);
  385. return status;
  386. }
  387. /**
  388. * dp_tx_tcl_desc_pool_free_rh() - Free the tcl descriptor pool
  389. * @soc: Handle to DP SoC structure
  390. * @pool_id: pool to free
  391. *
  392. */
  393. static void dp_tx_tcl_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  394. {
  395. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  396. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  397. qdf_dma_context_t memctx = 0;
  398. if (pool_id > MAX_TXDESC_POOLS - 1)
  399. return;
  400. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  401. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  402. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
  403. &tcl_desc_pool->desc_pages,
  404. memctx, false);
  405. }
  406. /**
  407. * dp_tx_tcl_desc_pool_init_rh() - Initialize tcl descriptor pool
  408. * based on pool_id
  409. * @soc: Handle to DP SoC structure
  410. * @num_elem: Number of descriptor elements per pool
  411. * @pool_id: pool to initialize
  412. *
  413. * Return: QDF_STATUS_SUCCESS
  414. * QDF_STATUS_E_FAULT
  415. */
  416. static QDF_STATUS
  417. dp_tx_tcl_desc_pool_init_rh(struct dp_soc *soc, uint32_t num_elem,
  418. uint8_t pool_id)
  419. {
  420. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  421. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  422. struct qdf_mem_dma_page_t *page_info;
  423. QDF_STATUS status;
  424. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  425. tcl_desc_pool->elem_size = DP_RH_TX_TCL_DESC_SIZE;
  426. tcl_desc_pool->elem_count = num_elem;
  427. /* Link tcl descriptors into a freelist */
  428. if (qdf_mem_multi_page_link(soc->osdev, &tcl_desc_pool->desc_pages,
  429. tcl_desc_pool->elem_size,
  430. tcl_desc_pool->elem_count,
  431. false)) {
  432. dp_err("failed to link tcl desc Pages");
  433. status = QDF_STATUS_E_FAULT;
  434. goto err_link_fail;
  435. }
  436. page_info = tcl_desc_pool->desc_pages.dma_pages;
  437. tcl_desc_pool->freelist = (uint32_t *)page_info->page_v_addr_start;
  438. return QDF_STATUS_SUCCESS;
  439. err_link_fail:
  440. return status;
  441. }
  442. /**
  443. * dp_tx_tcl_desc_pool_deinit_rh() - De-initialize tcl descriptor pool
  444. * based on pool_id
  445. * @soc: Handle to DP SoC structure
  446. * @pool_id: pool to de-initialize
  447. *
  448. */
  449. static void dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc *soc, uint8_t pool_id)
  450. {
  451. }
  452. /**
  453. * dp_tx_alloc_tcl_desc_rh() - Allocate a tcl descriptor from the pool
  454. * @tcl_desc_pool: Tcl descriptor pool
  455. * @tx_desc: SW TX descriptor
  456. * @index: Index into the tcl descriptor pool
  457. */
  458. static void dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s *tcl_desc_pool,
  459. struct dp_tx_desc_s *tx_desc,
  460. uint32_t index)
  461. {
  462. struct qdf_mem_dma_page_t *dma_page;
  463. uint32_t page_id;
  464. uint32_t offset;
  465. tx_desc->tcl_cmd_vaddr = (void *)tcl_desc_pool->freelist;
  466. if (tcl_desc_pool->freelist)
  467. tcl_desc_pool->freelist =
  468. *((uint32_t **)tcl_desc_pool->freelist);
  469. page_id = index / tcl_desc_pool->desc_pages.num_element_per_page;
  470. offset = index % tcl_desc_pool->desc_pages.num_element_per_page;
  471. dma_page = &tcl_desc_pool->desc_pages.dma_pages[page_id];
  472. tx_desc->tcl_cmd_paddr =
  473. dma_page->page_p_addr + offset * tcl_desc_pool->elem_size;
  474. }
  475. QDF_STATUS dp_tx_desc_pool_init_rh(struct dp_soc *soc,
  476. uint32_t num_elem,
  477. uint8_t pool_id,
  478. bool spcl_tx_desc)
  479. {
  480. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  481. uint32_t id, count, page_id, offset, pool_id_32;
  482. struct dp_tx_desc_s *tx_desc;
  483. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  484. struct dp_tx_desc_pool_s *tx_desc_pool;
  485. uint16_t num_desc_per_page;
  486. QDF_STATUS status;
  487. status = dp_tx_tcl_desc_pool_init_rh(soc, num_elem, pool_id);
  488. if (QDF_IS_STATUS_ERROR(status)) {
  489. dp_err("failed to initialise tcl desc pool %d", pool_id);
  490. goto err_out;
  491. }
  492. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  493. if (QDF_IS_STATUS_ERROR(status)) {
  494. dp_err("failed to initialise tx ext desc pool %d", pool_id);
  495. goto err_deinit_tcl_pool;
  496. }
  497. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem, pool_id);
  498. if (QDF_IS_STATUS_ERROR(status)) {
  499. dp_err("failed to initialise tso desc pool %d", pool_id);
  500. goto err_deinit_tx_ext_pool;
  501. }
  502. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem, pool_id);
  503. if (QDF_IS_STATUS_ERROR(status)) {
  504. dp_err("failed to initialise tso num seg pool %d", pool_id);
  505. goto err_deinit_tso_pool;
  506. }
  507. tx_desc_pool = &soc->tx_desc[pool_id];
  508. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  509. tx_desc = tx_desc_pool->freelist;
  510. count = 0;
  511. pool_id_32 = (uint32_t)pool_id;
  512. num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
  513. while (tx_desc) {
  514. page_id = count / num_desc_per_page;
  515. offset = count % num_desc_per_page;
  516. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  517. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  518. tx_desc->id = id;
  519. tx_desc->pool_id = pool_id;
  520. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  521. dp_tx_alloc_tcl_desc_rh(tcl_desc_pool, tx_desc, count);
  522. tx_desc = tx_desc->next;
  523. count++;
  524. }
  525. return QDF_STATUS_SUCCESS;
  526. err_deinit_tso_pool:
  527. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  528. err_deinit_tx_ext_pool:
  529. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  530. err_deinit_tcl_pool:
  531. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  532. err_out:
  533. /* TODO: is assert needed ? */
  534. qdf_assert_always(0);
  535. return status;
  536. }
  537. void dp_tx_desc_pool_deinit_rh(struct dp_soc *soc,
  538. struct dp_tx_desc_pool_s *tx_desc_pool,
  539. uint8_t pool_id, bool spcl_tx_desc)
  540. {
  541. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  542. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  543. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  544. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  545. }
  546. QDF_STATUS dp_tx_compute_tx_delay_rh(struct dp_soc *soc,
  547. struct dp_vdev *vdev,
  548. struct hal_tx_completion_status *ts,
  549. uint32_t *delay_us)
  550. {
  551. return QDF_STATUS_SUCCESS;
  552. }
  553. QDF_STATUS dp_tx_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  554. uint8_t pool_id)
  555. {
  556. QDF_STATUS status;
  557. status = dp_tx_tcl_desc_pool_alloc_rh(soc, num_elem, pool_id);
  558. if (QDF_IS_STATUS_ERROR(status)) {
  559. dp_err("failed to allocate tcl desc pool %d", pool_id);
  560. goto err_tcl_desc_pool;
  561. }
  562. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  563. if (QDF_IS_STATUS_ERROR(status)) {
  564. dp_err("failed to allocate tx ext desc pool %d", pool_id);
  565. goto err_free_tcl_pool;
  566. }
  567. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  568. if (QDF_IS_STATUS_ERROR(status)) {
  569. dp_err("failed to allocate tso desc pool %d", pool_id);
  570. goto err_free_tx_ext_pool;
  571. }
  572. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem, pool_id);
  573. if (QDF_IS_STATUS_ERROR(status)) {
  574. dp_err("failed to allocate tso num seg pool %d", pool_id);
  575. goto err_free_tso_pool;
  576. }
  577. return status;
  578. err_free_tso_pool:
  579. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  580. err_free_tx_ext_pool:
  581. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  582. err_free_tcl_pool:
  583. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  584. err_tcl_desc_pool:
  585. /* TODO: is assert needed ? */
  586. qdf_assert_always(0);
  587. return status;
  588. }
  589. void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  590. {
  591. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  592. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  593. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  594. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  595. }
  596. void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg)
  597. {
  598. struct dp_tx_desc_s *tx_desc = NULL;
  599. struct dp_tx_desc_s *head_desc = NULL;
  600. struct dp_tx_desc_s *tail_desc = NULL;
  601. uint32_t sw_cookie;
  602. uint32_t num_msdus;
  603. uint32_t *msg_word;
  604. uint8_t ring_id;
  605. uint8_t tx_status;
  606. int i;
  607. DP_HIST_INIT();
  608. msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
  609. num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
  610. msg_word += HTT_SOFT_UMAC_TX_COMPL_IND_SIZE >> 2;
  611. for (i = 0; i < num_msdus; i++) {
  612. sw_cookie = HTT_TX_BUFFER_ADDR_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1));
  613. tx_desc = dp_tx_comp_find_tx_desc_rh(soc, sw_cookie);
  614. if (!tx_desc) {
  615. dp_err("failed to find tx desc");
  616. qdf_assert_always(0);
  617. }
  618. /*
  619. * If the descriptor is already freed in vdev_detach,
  620. * continue to next descriptor
  621. */
  622. if (qdf_unlikely((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  623. !tx_desc->flags)) {
  624. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  625. tx_desc->id);
  626. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  627. dp_tx_desc_check_corruption(tx_desc);
  628. goto next_msdu;
  629. }
  630. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  631. dp_tx_comp_info_rl("pdev in down state %d",
  632. tx_desc->id);
  633. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  634. dp_tx_comp_free_buf(soc, tx_desc, false);
  635. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  636. goto next_msdu;
  637. }
  638. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  639. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  640. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  641. tx_desc->flags, tx_desc->id);
  642. qdf_assert_always(0);
  643. }
  644. if (HTT_TX_BUFFER_ADDR_INFO_RELEASE_SOURCE_GET(*(msg_word + 1)) ==
  645. HTT_TX_MSDU_RELEASE_SOURCE_FW)
  646. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
  647. else
  648. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_TQM;
  649. tx_desc->peer_id = HTT_TX_MSDU_INFO_SW_PEER_ID_GET(*(msg_word + 2));
  650. tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(*(msg_word + 3));
  651. tx_desc->tx_status =
  652. (tx_status == HTT_TX_MSDU_RELEASE_REASON_FRAME_ACKED ?
  653. HAL_TX_TQM_RR_FRAME_ACKED : HAL_TX_TQM_RR_REM_CMD_REM);
  654. qdf_mem_copy(&tx_desc->comp, msg_word, HTT_TX_MSDU_INFO_SIZE);
  655. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  656. /* First ring descriptor on the cycle */
  657. if (!head_desc) {
  658. head_desc = tx_desc;
  659. tail_desc = tx_desc;
  660. }
  661. tail_desc->next = tx_desc;
  662. tx_desc->next = NULL;
  663. tail_desc = tx_desc;
  664. next_msdu:
  665. msg_word += HTT_TX_MSDU_INFO_SIZE >> 2;
  666. }
  667. /* For now, pass ring_id as 0 (zero) as WCN6450 only
  668. * supports one TX ring.
  669. */
  670. ring_id = 0;
  671. if (head_desc)
  672. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  673. DP_STATS_INC(soc, tx.tx_comp[ring_id], num_msdus);
  674. DP_TX_HIST_STATS_PER_PDEV();
  675. }