dp_rh_tx.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*
  2. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "cdp_txrx_cmn_struct.h"
  19. #include "dp_types.h"
  20. #include "dp_tx.h"
  21. #include "dp_rh_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include <dp_internal.h>
  24. #include <dp_htt.h>
  25. #include <hal_rh_api.h>
  26. #include <hal_rh_tx.h>
  27. #include "dp_peer.h"
  28. #include "dp_rh.h"
  29. #include <ce_api.h>
  30. #include <ce_internal.h>
  31. #include "dp_rh_htt.h"
  32. extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
  33. #if defined(FEATURE_TSO)
  34. /**
  35. * dp_tx_adjust_tso_download_len_rh() - Adjust download length for TSO packet
  36. * @nbuf: socket buffer
  37. * @msdu_info: handle to struct dp_tx_msdu_info_s
  38. * @download_len: Packet download length that needs adjustment
  39. *
  40. * Return: uint32_t (Adjusted packet download length)
  41. */
  42. static uint32_t
  43. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  44. struct dp_tx_msdu_info_s *msdu_info,
  45. uint32_t download_len)
  46. {
  47. uint32_t frag0_len;
  48. uint32_t delta;
  49. uint32_t eit_hdr_len;
  50. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  51. download_len -= frag0_len;
  52. eit_hdr_len = msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].length;
  53. /* If EIT header length is less than the MSDU download length, then
  54. * adjust the download length to just hold EIT header.
  55. */
  56. if (eit_hdr_len < download_len) {
  57. delta = download_len - eit_hdr_len;
  58. download_len -= delta;
  59. }
  60. return download_len;
  61. }
  62. #else
  63. static uint32_t
  64. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  65. struct dp_tx_msdu_info_s *msdu_info,
  66. uint32_t download_len)
  67. {
  68. return download_len;
  69. }
  70. #endif /* FEATURE_TSO */
  71. void dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc *soc,
  72. void *tx_comp_hal_desc,
  73. struct dp_tx_desc_s **r_tx_desc)
  74. {
  75. }
  76. /**
  77. * dp_tx_comp_find_tx_desc_rh() - Find software TX descriptor using sw_cookie
  78. *
  79. * @soc: Handle to DP SoC structure
  80. * @sw_cookie: Key to find the TX descriptor
  81. *
  82. * Return: TX descriptor handle or NULL (if not found)
  83. */
  84. static struct dp_tx_desc_s *
  85. dp_tx_comp_find_tx_desc_rh(struct dp_soc *soc, uint32_t sw_cookie)
  86. {
  87. uint8_t pool_id;
  88. struct dp_tx_desc_s *tx_desc;
  89. pool_id = (sw_cookie & DP_TX_DESC_ID_POOL_MASK) >>
  90. DP_TX_DESC_ID_POOL_OS;
  91. /* Find Tx descriptor */
  92. tx_desc = dp_tx_desc_find(soc, pool_id,
  93. (sw_cookie & DP_TX_DESC_ID_PAGE_MASK) >>
  94. DP_TX_DESC_ID_PAGE_OS,
  95. (sw_cookie & DP_TX_DESC_ID_OFFSET_MASK) >>
  96. DP_TX_DESC_ID_OFFSET_OS, false);
  97. /* pool id is not matching. Error */
  98. if (tx_desc && tx_desc->pool_id != pool_id) {
  99. dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
  100. pool_id, tx_desc->pool_id);
  101. qdf_assert_always(0);
  102. }
  103. return tx_desc;
  104. }
  105. void dp_tx_process_htt_completion_rh(struct dp_soc *soc,
  106. struct dp_tx_desc_s *tx_desc,
  107. uint8_t *status,
  108. uint8_t ring_id)
  109. {
  110. }
  111. static inline uint32_t
  112. dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf, uint32_t download_len)
  113. {
  114. uint32_t frag0_len; /* TCL_DATA_CMD */
  115. uint32_t frag1_len; /* 64 byte payload */
  116. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  117. frag1_len = download_len - frag0_len;
  118. if (qdf_unlikely(qdf_nbuf_len(nbuf) < frag1_len))
  119. frag1_len = qdf_nbuf_len(nbuf);
  120. return frag0_len + frag1_len;
  121. }
  122. static inline void dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)
  123. {
  124. uint32_t pkt_offset;
  125. uint32_t tx_classify;
  126. uint32_t data_attr;
  127. /* Enable tx_classify bit in CE SRC DESC for all data packets */
  128. tx_classify = 1;
  129. pkt_offset = qdf_nbuf_get_frag_len(nbuf, 0);
  130. data_attr = tx_classify << CE_DESC_TX_CLASSIFY_BIT_S;
  131. data_attr |= pkt_offset << CE_DESC_PKT_OFFSET_BIT_S;
  132. qdf_nbuf_data_attr_set(nbuf, data_attr);
  133. }
  134. #ifdef DP_TX_HW_DESC_HISTORY
  135. static inline void
  136. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  137. {
  138. struct dp_tx_hw_desc_history *tx_hw_desc_history =
  139. &soc->tx_hw_desc_history;
  140. struct dp_tx_hw_desc_evt *evt;
  141. uint32_t idx = 0;
  142. uint16_t slot = 0;
  143. if (!tx_hw_desc_history->allocated)
  144. return;
  145. dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
  146. &slot,
  147. DP_TX_HW_DESC_HIST_SLOT_SHIFT,
  148. DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
  149. DP_TX_HW_DESC_HIST_MAX);
  150. evt = &tx_hw_desc_history->entry[slot][idx];
  151. qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  152. evt->posted = qdf_get_log_timestamp();
  153. evt->tcl_ring_id = 0;
  154. }
  155. #else
  156. static inline void
  157. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  158. {
  159. }
  160. #endif
  161. #if defined(FEATURE_RUNTIME_PM)
  162. static void dp_tx_update_write_index(struct dp_soc *soc,
  163. struct dp_tx_ep_info_rh *tx_ep_info,
  164. int coalesce)
  165. {
  166. int ret;
  167. /* Avoid runtime get and put APIs under high throughput scenarios */
  168. if (dp_get_rtpm_tput_policy_requirement(soc)) {
  169. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  170. coalesce);
  171. return;
  172. }
  173. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  174. if (QDF_IS_STATUS_SUCCESS(ret)) {
  175. if (hif_system_pm_state_check(soc->hif_handle)) {
  176. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  177. CE_RING_FLUSH_EVENT);
  178. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  179. } else {
  180. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  181. coalesce);
  182. }
  183. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  184. } else {
  185. dp_runtime_get(soc);
  186. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  187. CE_RING_FLUSH_EVENT);
  188. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  189. qdf_atomic_inc(&soc->tx_pending_rtpm);
  190. dp_runtime_put(soc);
  191. }
  192. }
  193. #elif defined(DP_POWER_SAVE)
  194. static void dp_tx_update_write_index(struct dp_soc *soc,
  195. struct dp_tx_ep_info_rh *tx_ep_info)
  196. {
  197. if (hif_system_pm_state_check(soc->hif_handle)) {
  198. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  199. CE_RING_FLUSH_EVENT);
  200. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  201. } else {
  202. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  203. coalesce);
  204. }
  205. }
  206. #else
  207. static void dp_tx_update_write_index(struct dp_soc *soc,
  208. struct dp_tx_ep_info_rh *tx_ep_info)
  209. {
  210. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  211. coalesce);
  212. }
  213. #endif
  214. /*
  215. * dp_flush_tx_ring_rh() - flush tx ring write index
  216. * @pdev: dp pdev handle
  217. * @ring_id: Tx ring id
  218. *
  219. * Return: 0 on success and error code on failure
  220. */
  221. int dp_flush_tx_ring_rh(struct dp_pdev *pdev, int ring_id)
  222. {
  223. struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(pdev);
  224. struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
  225. int ret;
  226. ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
  227. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  228. if (ret) {
  229. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  230. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  231. CE_RING_FLUSH_EVENT);
  232. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  233. return ret;
  234. }
  235. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
  236. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  237. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  238. return ret;
  239. }
  240. QDF_STATUS
  241. dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
  242. struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
  243. struct cdp_tx_exception_metadata *tx_exc_metadata,
  244. struct dp_tx_msdu_info_s *msdu_info)
  245. {
  246. struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(vdev->pdev);
  247. struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
  248. uint32_t download_len = tx_ep_info->download_len;
  249. qdf_nbuf_t nbuf = tx_desc->nbuf;
  250. uint8_t tid = msdu_info->tid;
  251. uint32_t *hal_tx_desc_cached;
  252. int coalesce = 0;
  253. int ret;
  254. /*
  255. * Setting it initialization statically here to avoid
  256. * a memset call jump with qdf_mem_set call
  257. */
  258. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  259. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  260. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  261. tx_exc_metadata->sec_type : vdev->sec_type);
  262. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  263. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  264. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  265. return QDF_STATUS_E_RESOURCES;
  266. }
  267. hal_tx_desc_cached = (void *)cached_desc;
  268. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  269. tx_desc->dma_addr, 0, tx_desc->id,
  270. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
  271. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  272. vdev->lmac_id);
  273. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  274. vdev->search_type);
  275. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  276. vdev->bss_ast_idx);
  277. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  278. sec_type_map[sec_type]);
  279. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  280. (vdev->bss_ast_hash & 0xF));
  281. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  282. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  283. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  284. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  285. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  286. vdev->hal_desc_addr_search_flags);
  287. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  288. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  289. /* verify checksum offload configuration*/
  290. if ((qdf_nbuf_get_tx_cksum(nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) ||
  291. qdf_nbuf_is_tso(nbuf)) {
  292. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  293. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  294. }
  295. if (tid != HTT_TX_EXT_TID_INVALID)
  296. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  297. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  298. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  299. if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
  300. dp_tx_desc_set_timestamp(tx_desc);
  301. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  302. tx_desc->length,
  303. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
  304. (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
  305. tx_desc->id);
  306. hal_tx_desc_sync(hal_tx_desc_cached, tx_desc->tcl_cmd_vaddr);
  307. qdf_nbuf_frag_push_head(nbuf, DP_RH_TX_TCL_DESC_SIZE,
  308. (char *)tx_desc->tcl_cmd_vaddr,
  309. tx_desc->tcl_cmd_paddr);
  310. download_len = dp_tx_adjust_download_len_rh(nbuf, download_len);
  311. if (qdf_nbuf_is_tso(nbuf)) {
  312. QDF_NBUF_CB_PADDR(nbuf) =
  313. msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].paddr;
  314. download_len = dp_tx_adjust_tso_download_len_rh(nbuf, msdu_info,
  315. download_len);
  316. }
  317. dp_tx_fill_nbuf_data_attr_rh(nbuf);
  318. ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
  319. ret = ce_enqueue_desc(tx_ep_info->ce_tx_hdl, nbuf,
  320. tx_ep_info->tx_endpoint, download_len);
  321. if (ret) {
  322. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  323. dp_verbose_debug("CE tx ring full");
  324. /* TODO: Should this be a separate ce_ring_full stat? */
  325. DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
  326. DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail, 1);
  327. goto enqueue_fail;
  328. }
  329. coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
  330. msdu_info, 0);
  331. dp_tx_update_write_index(soc, tx_ep_info, coalesce);
  332. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  333. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  334. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
  335. DP_STATS_INC_PKT(vdev, tx_i[DP_XMIT_LINK].processed, 1,
  336. tx_desc->length);
  337. DP_STATS_INC(soc, tx.tcl_enq[0], 1);
  338. dp_tx_update_stats(soc, tx_desc, 0);
  339. status = QDF_STATUS_SUCCESS;
  340. dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc);
  341. enqueue_fail:
  342. dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
  343. qdf_get_log_timestamp(), tx_desc->nbuf);
  344. return status;
  345. }
  346. /**
  347. * dp_tx_tcl_desc_pool_alloc_rh() - Allocate the tcl descriptor pool
  348. * based on pool_id
  349. * @soc: Handle to DP SoC structure
  350. * @num_elem: Number of descriptor elements per pool
  351. * @pool_id: Pool to allocate
  352. *
  353. * Return: QDF_STATUS_SUCCESS
  354. * QDF_STATUS_E_NOMEM
  355. */
  356. static QDF_STATUS
  357. dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  358. uint8_t pool_id)
  359. {
  360. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  361. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  362. uint16_t elem_size = DP_RH_TX_TCL_DESC_SIZE;
  363. QDF_STATUS status = QDF_STATUS_SUCCESS;
  364. qdf_dma_context_t memctx = 0;
  365. if (pool_id > MAX_TXDESC_POOLS - 1)
  366. return QDF_STATUS_E_INVAL;
  367. /* Allocate tcl descriptors in coherent memory */
  368. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  369. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  370. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TCL_DESC_TYPE,
  371. &tcl_desc_pool->desc_pages,
  372. elem_size, num_elem, memctx, false);
  373. if (!tcl_desc_pool->desc_pages.num_pages) {
  374. dp_err("failed to allocate tcl desc Pages");
  375. status = QDF_STATUS_E_NOMEM;
  376. goto err_alloc_fail;
  377. }
  378. return status;
  379. err_alloc_fail:
  380. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
  381. &tcl_desc_pool->desc_pages,
  382. memctx, false);
  383. return status;
  384. }
  385. /**
  386. * dp_tx_tcl_desc_pool_free_rh() - Free the tcl descriptor pool
  387. * @soc: Handle to DP SoC structure
  388. * @pool_id: pool to free
  389. *
  390. */
  391. static void dp_tx_tcl_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  392. {
  393. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  394. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  395. qdf_dma_context_t memctx = 0;
  396. if (pool_id > MAX_TXDESC_POOLS - 1)
  397. return;
  398. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  399. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  400. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
  401. &tcl_desc_pool->desc_pages,
  402. memctx, false);
  403. }
  404. /**
  405. * dp_tx_tcl_desc_pool_init_rh() - Initialize tcl descriptor pool
  406. * based on pool_id
  407. * @soc: Handle to DP SoC structure
  408. * @num_elem: Number of descriptor elements per pool
  409. * @pool_id: pool to initialize
  410. *
  411. * Return: QDF_STATUS_SUCCESS
  412. * QDF_STATUS_E_FAULT
  413. */
  414. static QDF_STATUS
  415. dp_tx_tcl_desc_pool_init_rh(struct dp_soc *soc, uint32_t num_elem,
  416. uint8_t pool_id)
  417. {
  418. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  419. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  420. struct qdf_mem_dma_page_t *page_info;
  421. QDF_STATUS status;
  422. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  423. tcl_desc_pool->elem_size = DP_RH_TX_TCL_DESC_SIZE;
  424. tcl_desc_pool->elem_count = num_elem;
  425. /* Link tcl descriptors into a freelist */
  426. if (qdf_mem_multi_page_link(soc->osdev, &tcl_desc_pool->desc_pages,
  427. tcl_desc_pool->elem_size,
  428. tcl_desc_pool->elem_count,
  429. false)) {
  430. dp_err("failed to link tcl desc Pages");
  431. status = QDF_STATUS_E_FAULT;
  432. goto err_link_fail;
  433. }
  434. page_info = tcl_desc_pool->desc_pages.dma_pages;
  435. tcl_desc_pool->freelist = (uint32_t *)page_info->page_v_addr_start;
  436. return QDF_STATUS_SUCCESS;
  437. err_link_fail:
  438. return status;
  439. }
  440. /**
  441. * dp_tx_tcl_desc_pool_deinit_rh() - De-initialize tcl descriptor pool
  442. * based on pool_id
  443. * @soc: Handle to DP SoC structure
  444. * @pool_id: pool to de-initialize
  445. *
  446. */
  447. static void dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc *soc, uint8_t pool_id)
  448. {
  449. }
  450. /**
  451. * dp_tx_alloc_tcl_desc_rh() - Allocate a tcl descriptor from the pool
  452. * @tcl_desc_pool: Tcl descriptor pool
  453. * @tx_desc: SW TX descriptor
  454. * @index: Index into the tcl descriptor pool
  455. */
  456. static void dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s *tcl_desc_pool,
  457. struct dp_tx_desc_s *tx_desc,
  458. uint32_t index)
  459. {
  460. struct qdf_mem_dma_page_t *dma_page;
  461. uint32_t page_id;
  462. uint32_t offset;
  463. tx_desc->tcl_cmd_vaddr = (void *)tcl_desc_pool->freelist;
  464. if (tcl_desc_pool->freelist)
  465. tcl_desc_pool->freelist =
  466. *((uint32_t **)tcl_desc_pool->freelist);
  467. page_id = index / tcl_desc_pool->desc_pages.num_element_per_page;
  468. offset = index % tcl_desc_pool->desc_pages.num_element_per_page;
  469. dma_page = &tcl_desc_pool->desc_pages.dma_pages[page_id];
  470. tx_desc->tcl_cmd_paddr =
  471. dma_page->page_p_addr + offset * tcl_desc_pool->elem_size;
  472. }
  473. QDF_STATUS dp_tx_desc_pool_init_rh(struct dp_soc *soc,
  474. uint32_t num_elem,
  475. uint8_t pool_id,
  476. bool spcl_tx_desc)
  477. {
  478. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  479. uint32_t id, count, page_id, offset, pool_id_32;
  480. struct dp_tx_desc_s *tx_desc;
  481. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  482. struct dp_tx_desc_pool_s *tx_desc_pool;
  483. uint16_t num_desc_per_page;
  484. QDF_STATUS status;
  485. status = dp_tx_tcl_desc_pool_init_rh(soc, num_elem, pool_id);
  486. if (QDF_IS_STATUS_ERROR(status)) {
  487. dp_err("failed to initialise tcl desc pool %d", pool_id);
  488. goto err_out;
  489. }
  490. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  491. if (QDF_IS_STATUS_ERROR(status)) {
  492. dp_err("failed to initialise tx ext desc pool %d", pool_id);
  493. goto err_deinit_tcl_pool;
  494. }
  495. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem, pool_id);
  496. if (QDF_IS_STATUS_ERROR(status)) {
  497. dp_err("failed to initialise tso desc pool %d", pool_id);
  498. goto err_deinit_tx_ext_pool;
  499. }
  500. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem, pool_id);
  501. if (QDF_IS_STATUS_ERROR(status)) {
  502. dp_err("failed to initialise tso num seg pool %d", pool_id);
  503. goto err_deinit_tso_pool;
  504. }
  505. tx_desc_pool = &soc->tx_desc[pool_id];
  506. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  507. tx_desc = tx_desc_pool->freelist;
  508. count = 0;
  509. pool_id_32 = (uint32_t)pool_id;
  510. num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
  511. while (tx_desc) {
  512. page_id = count / num_desc_per_page;
  513. offset = count % num_desc_per_page;
  514. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  515. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  516. tx_desc->id = id;
  517. tx_desc->pool_id = pool_id;
  518. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  519. dp_tx_alloc_tcl_desc_rh(tcl_desc_pool, tx_desc, count);
  520. tx_desc = tx_desc->next;
  521. count++;
  522. }
  523. return QDF_STATUS_SUCCESS;
  524. err_deinit_tso_pool:
  525. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  526. err_deinit_tx_ext_pool:
  527. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  528. err_deinit_tcl_pool:
  529. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  530. err_out:
  531. /* TODO: is assert needed ? */
  532. qdf_assert_always(0);
  533. return status;
  534. }
  535. void dp_tx_desc_pool_deinit_rh(struct dp_soc *soc,
  536. struct dp_tx_desc_pool_s *tx_desc_pool,
  537. uint8_t pool_id, bool spcl_tx_desc)
  538. {
  539. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  540. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  541. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  542. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  543. }
  544. QDF_STATUS dp_tx_compute_tx_delay_rh(struct dp_soc *soc,
  545. struct dp_vdev *vdev,
  546. struct hal_tx_completion_status *ts,
  547. uint32_t *delay_us)
  548. {
  549. return QDF_STATUS_SUCCESS;
  550. }
  551. QDF_STATUS dp_tx_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  552. uint8_t pool_id)
  553. {
  554. QDF_STATUS status;
  555. status = dp_tx_tcl_desc_pool_alloc_rh(soc, num_elem, pool_id);
  556. if (QDF_IS_STATUS_ERROR(status)) {
  557. dp_err("failed to allocate tcl desc pool %d", pool_id);
  558. goto err_tcl_desc_pool;
  559. }
  560. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  561. if (QDF_IS_STATUS_ERROR(status)) {
  562. dp_err("failed to allocate tx ext desc pool %d", pool_id);
  563. goto err_free_tcl_pool;
  564. }
  565. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  566. if (QDF_IS_STATUS_ERROR(status)) {
  567. dp_err("failed to allocate tso desc pool %d", pool_id);
  568. goto err_free_tx_ext_pool;
  569. }
  570. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem, pool_id);
  571. if (QDF_IS_STATUS_ERROR(status)) {
  572. dp_err("failed to allocate tso num seg pool %d", pool_id);
  573. goto err_free_tso_pool;
  574. }
  575. return status;
  576. err_free_tso_pool:
  577. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  578. err_free_tx_ext_pool:
  579. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  580. err_free_tcl_pool:
  581. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  582. err_tcl_desc_pool:
  583. /* TODO: is assert needed ? */
  584. qdf_assert_always(0);
  585. return status;
  586. }
  587. void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  588. {
  589. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  590. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  591. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  592. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  593. }
  594. void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg)
  595. {
  596. struct dp_tx_desc_s *tx_desc = NULL;
  597. struct dp_tx_desc_s *head_desc = NULL;
  598. struct dp_tx_desc_s *tail_desc = NULL;
  599. uint32_t sw_cookie;
  600. uint32_t num_msdus;
  601. uint32_t *msg_word;
  602. uint8_t ring_id;
  603. uint8_t tx_status;
  604. int i;
  605. DP_HIST_INIT();
  606. msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
  607. num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
  608. msg_word += HTT_SOFT_UMAC_TX_COMPL_IND_SIZE >> 2;
  609. for (i = 0; i < num_msdus; i++) {
  610. sw_cookie = HTT_TX_BUFFER_ADDR_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1));
  611. tx_desc = dp_tx_comp_find_tx_desc_rh(soc, sw_cookie);
  612. if (!tx_desc) {
  613. dp_err("failed to find tx desc");
  614. qdf_assert_always(0);
  615. }
  616. /*
  617. * If the descriptor is already freed in vdev_detach,
  618. * continue to next descriptor
  619. */
  620. if (qdf_unlikely((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  621. !tx_desc->flags)) {
  622. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  623. tx_desc->id);
  624. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  625. dp_tx_desc_check_corruption(tx_desc);
  626. goto next_msdu;
  627. }
  628. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  629. dp_tx_comp_info_rl("pdev in down state %d",
  630. tx_desc->id);
  631. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  632. dp_tx_comp_free_buf(soc, tx_desc, false);
  633. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  634. goto next_msdu;
  635. }
  636. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  637. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  638. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  639. tx_desc->flags, tx_desc->id);
  640. qdf_assert_always(0);
  641. }
  642. if (HTT_TX_BUFFER_ADDR_INFO_RELEASE_SOURCE_GET(*(msg_word + 1)) ==
  643. HTT_TX_MSDU_RELEASE_SOURCE_FW)
  644. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
  645. else
  646. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_TQM;
  647. tx_desc->peer_id = HTT_TX_MSDU_INFO_SW_PEER_ID_GET(*(msg_word + 2));
  648. tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(*(msg_word + 3));
  649. tx_desc->tx_status =
  650. (tx_status == HTT_TX_MSDU_RELEASE_REASON_FRAME_ACKED ?
  651. HAL_TX_TQM_RR_FRAME_ACKED : HAL_TX_TQM_RR_REM_CMD_REM);
  652. qdf_mem_copy(&tx_desc->comp, msg_word, HTT_TX_MSDU_INFO_SIZE);
  653. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  654. /* First ring descriptor on the cycle */
  655. if (!head_desc) {
  656. head_desc = tx_desc;
  657. tail_desc = tx_desc;
  658. }
  659. tail_desc->next = tx_desc;
  660. tx_desc->next = NULL;
  661. tail_desc = tx_desc;
  662. next_msdu:
  663. msg_word += HTT_TX_MSDU_INFO_SIZE >> 2;
  664. }
  665. /* For now, pass ring_id as 0 (zero) as WCN6450 only
  666. * supports one TX ring.
  667. */
  668. ring_id = 0;
  669. if (head_desc)
  670. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  671. DP_STATS_INC(soc, tx.tx_comp[ring_id], num_msdus);
  672. DP_TX_HIST_STATS_PER_PDEV();
  673. }