dp_rh_tx.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790
  1. /*
  2. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "cdp_txrx_cmn_struct.h"
  19. #include "dp_types.h"
  20. #include "dp_tx.h"
  21. #include "dp_rh_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include <dp_internal.h>
  24. #include <dp_htt.h>
  25. #include <hal_rh_api.h>
  26. #include <hal_rh_tx.h>
  27. #include "dp_peer.h"
  28. #include "dp_rh.h"
  29. #include <ce_api.h>
  30. #include <ce_internal.h>
  31. #include "dp_rh_htt.h"
  32. extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
  33. #if defined(FEATURE_TSO)
  34. /**
  35. * dp_tx_adjust_tso_download_len_rh() - Adjust download length for TSO packet
  36. * @nbuf: socket buffer
  37. * @msdu_info: handle to struct dp_tx_msdu_info_s
  38. * @download_len: Packet download length that needs adjustment
  39. *
  40. * Return: uint32_t (Adjusted packet download length)
  41. */
  42. static uint32_t
  43. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  44. struct dp_tx_msdu_info_s *msdu_info,
  45. uint32_t download_len)
  46. {
  47. uint32_t frag0_len;
  48. uint32_t delta;
  49. uint32_t eit_hdr_len;
  50. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  51. download_len -= frag0_len;
  52. eit_hdr_len = msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].length;
  53. /* If EIT header length is less than the MSDU download length, then
  54. * adjust the download length to just hold EIT header.
  55. */
  56. if (eit_hdr_len < download_len) {
  57. delta = download_len - eit_hdr_len;
  58. download_len -= delta;
  59. }
  60. return download_len;
  61. }
  62. #else
  63. static uint32_t
  64. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  65. struct dp_tx_msdu_info_s *msdu_info,
  66. uint32_t download_len)
  67. {
  68. return download_len;
  69. }
  70. #endif /* FEATURE_TSO */
  71. void dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc *soc,
  72. void *tx_comp_hal_desc,
  73. struct dp_tx_desc_s **r_tx_desc)
  74. {
  75. }
  76. /**
  77. * dp_tx_comp_find_tx_desc_rh() - Find software TX descriptor using sw_cookie
  78. *
  79. * @soc: Handle to DP SoC structure
  80. * @sw_cookie: Key to find the TX descriptor
  81. *
  82. * Return: TX descriptor handle or NULL (if not found)
  83. */
  84. static struct dp_tx_desc_s *
  85. dp_tx_comp_find_tx_desc_rh(struct dp_soc *soc, uint32_t sw_cookie)
  86. {
  87. uint8_t pool_id;
  88. struct dp_tx_desc_s *tx_desc;
  89. pool_id = (sw_cookie & DP_TX_DESC_ID_POOL_MASK) >>
  90. DP_TX_DESC_ID_POOL_OS;
  91. /* Find Tx descriptor */
  92. tx_desc = dp_tx_desc_find(soc, pool_id,
  93. (sw_cookie & DP_TX_DESC_ID_PAGE_MASK) >>
  94. DP_TX_DESC_ID_PAGE_OS,
  95. (sw_cookie & DP_TX_DESC_ID_OFFSET_MASK) >>
  96. DP_TX_DESC_ID_OFFSET_OS);
  97. /* pool id is not matching. Error */
  98. if (tx_desc && tx_desc->pool_id != pool_id) {
  99. dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
  100. pool_id, tx_desc->pool_id);
  101. qdf_assert_always(0);
  102. }
  103. return tx_desc;
  104. }
  105. void dp_tx_process_htt_completion_rh(struct dp_soc *soc,
  106. struct dp_tx_desc_s *tx_desc,
  107. uint8_t *status,
  108. uint8_t ring_id)
  109. {
  110. }
  111. static inline uint32_t
  112. dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf, uint32_t download_len)
  113. {
  114. uint32_t frag0_len; /* TCL_DATA_CMD */
  115. uint32_t frag1_len; /* 64 byte payload */
  116. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  117. frag1_len = download_len - frag0_len;
  118. if (qdf_unlikely(qdf_nbuf_len(nbuf) < frag1_len))
  119. frag1_len = qdf_nbuf_len(nbuf);
  120. return frag0_len + frag1_len;
  121. }
  122. static inline void dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)
  123. {
  124. uint32_t pkt_offset;
  125. uint32_t tx_classify;
  126. uint32_t data_attr;
  127. /* Enable tx_classify bit in CE SRC DESC for all data packets */
  128. tx_classify = 1;
  129. pkt_offset = qdf_nbuf_get_frag_len(nbuf, 0);
  130. data_attr = tx_classify << CE_DESC_TX_CLASSIFY_BIT_S;
  131. data_attr |= pkt_offset << CE_DESC_PKT_OFFSET_BIT_S;
  132. qdf_nbuf_data_attr_set(nbuf, data_attr);
  133. }
  134. #ifdef DP_TX_HW_DESC_HISTORY
  135. static inline void
  136. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  137. {
  138. struct dp_tx_hw_desc_history *tx_hw_desc_history =
  139. &soc->tx_hw_desc_history;
  140. struct dp_tx_hw_desc_evt *evt;
  141. uint32_t idx = 0;
  142. uint16_t slot = 0;
  143. if (!tx_hw_desc_history->allocated)
  144. return;
  145. dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
  146. &slot,
  147. DP_TX_HW_DESC_HIST_SLOT_SHIFT,
  148. DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
  149. DP_TX_HW_DESC_HIST_MAX);
  150. evt = &tx_hw_desc_history->entry[slot][idx];
  151. qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  152. evt->posted = qdf_get_log_timestamp();
  153. evt->tcl_ring_id = 0;
  154. }
  155. #else
  156. static inline void
  157. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  158. {
  159. }
  160. #endif
  161. #if defined(FEATURE_RUNTIME_PM)
  162. static void dp_tx_update_write_index(struct dp_soc *soc,
  163. struct dp_tx_ep_info_rh *tx_ep_info,
  164. int coalesce)
  165. {
  166. int ret;
  167. /* Avoid runtime get and put APIs under high throughput scenarios */
  168. if (dp_get_rtpm_tput_policy_requirement(soc)) {
  169. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  170. coalesce);
  171. return;
  172. }
  173. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  174. if (QDF_IS_STATUS_SUCCESS(ret)) {
  175. if (hif_system_pm_state_check(soc->hif_handle)) {
  176. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  177. CE_RING_FLUSH_EVENT);
  178. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  179. } else {
  180. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  181. coalesce);
  182. }
  183. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  184. } else {
  185. dp_runtime_get(soc);
  186. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  187. CE_RING_FLUSH_EVENT);
  188. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  189. qdf_atomic_inc(&soc->tx_pending_rtpm);
  190. dp_runtime_put(soc);
  191. }
  192. }
  193. #elif defined(DP_POWER_SAVE)
  194. static void dp_tx_update_write_index(struct dp_soc *soc,
  195. struct dp_tx_ep_info_rh *tx_ep_info)
  196. {
  197. if (hif_system_pm_state_check(soc->hif_handle)) {
  198. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  199. CE_RING_FLUSH_EVENT);
  200. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  201. } else {
  202. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  203. coalesce);
  204. }
  205. }
  206. #else
  207. static void dp_tx_update_write_index(struct dp_soc *soc,
  208. struct dp_tx_ep_info_rh *tx_ep_info)
  209. {
  210. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  211. coalesce);
  212. }
  213. #endif
  214. /*
  215. * dp_flush_tx_ring_rh() - flush tx ring write index
  216. * @pdev: dp pdev handle
  217. * @ring_id: Tx ring id
  218. *
  219. * Return: 0 on success and error code on failure
  220. */
  221. int dp_flush_tx_ring_rh(struct dp_pdev *pdev, int ring_id)
  222. {
  223. struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(pdev);
  224. struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
  225. int ret;
  226. ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
  227. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  228. if (ret) {
  229. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  230. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  231. CE_RING_FLUSH_EVENT);
  232. ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
  233. return ret;
  234. }
  235. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
  236. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  237. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  238. return ret;
  239. }
  240. QDF_STATUS
  241. dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
  242. struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
  243. struct cdp_tx_exception_metadata *tx_exc_metadata,
  244. struct dp_tx_msdu_info_s *msdu_info)
  245. {
  246. struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(vdev->pdev);
  247. struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
  248. uint32_t download_len = tx_ep_info->download_len;
  249. qdf_nbuf_t nbuf = tx_desc->nbuf;
  250. uint8_t tid = msdu_info->tid;
  251. uint32_t *hal_tx_desc_cached;
  252. int coalesce = 0;
  253. int ret;
  254. /*
  255. * Setting it initialization statically here to avoid
  256. * a memset call jump with qdf_mem_set call
  257. */
  258. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  259. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  260. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  261. tx_exc_metadata->sec_type : vdev->sec_type);
  262. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  263. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  264. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  265. return QDF_STATUS_E_RESOURCES;
  266. }
  267. hal_tx_desc_cached = (void *)cached_desc;
  268. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  269. tx_desc->dma_addr, 0, tx_desc->id,
  270. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
  271. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  272. vdev->lmac_id);
  273. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  274. vdev->search_type);
  275. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  276. vdev->bss_ast_idx);
  277. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  278. sec_type_map[sec_type]);
  279. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  280. (vdev->bss_ast_hash & 0xF));
  281. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  282. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  283. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  284. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  285. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  286. vdev->hal_desc_addr_search_flags);
  287. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  288. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  289. /* verify checksum offload configuration*/
  290. if ((qdf_nbuf_get_tx_cksum(nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) ||
  291. qdf_nbuf_is_tso(nbuf)) {
  292. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  293. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  294. }
  295. if (tid != HTT_TX_EXT_TID_INVALID)
  296. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  297. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  298. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  299. if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
  300. dp_tx_desc_set_timestamp(tx_desc);
  301. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  302. tx_desc->length,
  303. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
  304. (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
  305. tx_desc->id);
  306. hal_tx_desc_sync(hal_tx_desc_cached, tx_desc->tcl_cmd_vaddr);
  307. qdf_nbuf_frag_push_head(nbuf, DP_RH_TX_TCL_DESC_SIZE,
  308. (char *)tx_desc->tcl_cmd_vaddr,
  309. tx_desc->tcl_cmd_paddr);
  310. download_len = dp_tx_adjust_download_len_rh(nbuf, download_len);
  311. if (qdf_nbuf_is_tso(nbuf)) {
  312. QDF_NBUF_CB_PADDR(nbuf) =
  313. msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].paddr;
  314. download_len = dp_tx_adjust_tso_download_len_rh(nbuf, msdu_info,
  315. download_len);
  316. }
  317. dp_tx_fill_nbuf_data_attr_rh(nbuf);
  318. ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
  319. ret = ce_enqueue_desc(tx_ep_info->ce_tx_hdl, nbuf,
  320. tx_ep_info->tx_endpoint, download_len);
  321. if (ret) {
  322. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  323. dp_verbose_debug("CE tx ring full");
  324. /* TODO: Should this be a separate ce_ring_full stat? */
  325. DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
  326. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  327. goto enqueue_fail;
  328. }
  329. coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
  330. msdu_info, 0);
  331. dp_tx_update_write_index(soc, tx_ep_info, coalesce);
  332. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  333. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  334. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
  335. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
  336. DP_STATS_INC(soc, tx.tcl_enq[0], 1);
  337. dp_tx_update_stats(soc, tx_desc, 0);
  338. status = QDF_STATUS_SUCCESS;
  339. dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc);
  340. enqueue_fail:
  341. dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
  342. qdf_get_log_timestamp(), tx_desc->nbuf);
  343. return status;
  344. }
  345. /**
  346. * dp_tx_tcl_desc_pool_alloc_rh() - Allocate the tcl descriptor pool
  347. * based on pool_id
  348. * @soc: Handle to DP SoC structure
  349. * @num_elem: Number of descriptor elements per pool
  350. * @pool_id: Pool to allocate
  351. *
  352. * Return: QDF_STATUS_SUCCESS
  353. * QDF_STATUS_E_NOMEM
  354. */
  355. static QDF_STATUS
  356. dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  357. uint8_t pool_id)
  358. {
  359. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  360. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  361. uint16_t elem_size = DP_RH_TX_TCL_DESC_SIZE;
  362. QDF_STATUS status = QDF_STATUS_SUCCESS;
  363. qdf_dma_context_t memctx = 0;
  364. if (pool_id > MAX_TXDESC_POOLS - 1)
  365. return QDF_STATUS_E_INVAL;
  366. /* Allocate tcl descriptors in coherent memory */
  367. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  368. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  369. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TCL_DESC_TYPE,
  370. &tcl_desc_pool->desc_pages,
  371. elem_size, num_elem, memctx, false);
  372. if (!tcl_desc_pool->desc_pages.num_pages) {
  373. dp_err("failed to allocate tcl desc Pages");
  374. status = QDF_STATUS_E_NOMEM;
  375. goto err_alloc_fail;
  376. }
  377. return status;
  378. err_alloc_fail:
  379. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
  380. &tcl_desc_pool->desc_pages,
  381. memctx, false);
  382. return status;
  383. }
  384. /**
  385. * dp_tx_tcl_desc_pool_free_rh() - Free the tcl descriptor pool
  386. * @soc: Handle to DP SoC structure
  387. * @pool_id: pool to free
  388. *
  389. */
  390. static void dp_tx_tcl_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  391. {
  392. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  393. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  394. qdf_dma_context_t memctx = 0;
  395. if (pool_id > MAX_TXDESC_POOLS - 1)
  396. return;
  397. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  398. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  399. dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TCL_DESC_TYPE,
  400. &tcl_desc_pool->desc_pages,
  401. memctx, false);
  402. }
  403. /**
  404. * dp_tx_tcl_desc_pool_init_rh() - Initialize tcl descriptor pool
  405. * based on pool_id
  406. * @soc: Handle to DP SoC structure
  407. * @num_elem: Number of descriptor elements per pool
  408. * @pool_id: pool to initialize
  409. *
  410. * Return: QDF_STATUS_SUCCESS
  411. * QDF_STATUS_E_FAULT
  412. */
  413. static QDF_STATUS
  414. dp_tx_tcl_desc_pool_init_rh(struct dp_soc *soc, uint32_t num_elem,
  415. uint8_t pool_id)
  416. {
  417. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  418. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  419. struct qdf_mem_dma_page_t *page_info;
  420. QDF_STATUS status;
  421. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  422. tcl_desc_pool->elem_size = DP_RH_TX_TCL_DESC_SIZE;
  423. tcl_desc_pool->elem_count = num_elem;
  424. /* Link tcl descriptors into a freelist */
  425. if (qdf_mem_multi_page_link(soc->osdev, &tcl_desc_pool->desc_pages,
  426. tcl_desc_pool->elem_size,
  427. tcl_desc_pool->elem_count,
  428. false)) {
  429. dp_err("failed to link tcl desc Pages");
  430. status = QDF_STATUS_E_FAULT;
  431. goto err_link_fail;
  432. }
  433. page_info = tcl_desc_pool->desc_pages.dma_pages;
  434. tcl_desc_pool->freelist = (uint32_t *)page_info->page_v_addr_start;
  435. return QDF_STATUS_SUCCESS;
  436. err_link_fail:
  437. return status;
  438. }
  439. /**
  440. * dp_tx_tcl_desc_pool_deinit_rh() - De-initialize tcl descriptor pool
  441. * based on pool_id
  442. * @soc: Handle to DP SoC structure
  443. * @pool_id: pool to de-initialize
  444. *
  445. */
  446. static void dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc *soc, uint8_t pool_id)
  447. {
  448. }
  449. /**
  450. * dp_tx_alloc_tcl_desc_rh() - Allocate a tcl descriptor from the pool
  451. * @tcl_desc_pool: Tcl descriptor pool
  452. * @tx_desc: SW TX descriptor
  453. * @index: Index into the tcl descriptor pool
  454. */
  455. static void dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s *tcl_desc_pool,
  456. struct dp_tx_desc_s *tx_desc,
  457. uint32_t index)
  458. {
  459. struct qdf_mem_dma_page_t *dma_page;
  460. uint32_t page_id;
  461. uint32_t offset;
  462. tx_desc->tcl_cmd_vaddr = (void *)tcl_desc_pool->freelist;
  463. if (tcl_desc_pool->freelist)
  464. tcl_desc_pool->freelist =
  465. *((uint32_t **)tcl_desc_pool->freelist);
  466. page_id = index / tcl_desc_pool->desc_pages.num_element_per_page;
  467. offset = index % tcl_desc_pool->desc_pages.num_element_per_page;
  468. dma_page = &tcl_desc_pool->desc_pages.dma_pages[page_id];
  469. tx_desc->tcl_cmd_paddr =
  470. dma_page->page_p_addr + offset * tcl_desc_pool->elem_size;
  471. }
  472. QDF_STATUS dp_tx_desc_pool_init_rh(struct dp_soc *soc,
  473. uint32_t num_elem,
  474. uint8_t pool_id,
  475. bool spcl_tx_desc)
  476. {
  477. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  478. uint32_t id, count, page_id, offset, pool_id_32;
  479. struct dp_tx_desc_s *tx_desc;
  480. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  481. struct dp_tx_desc_pool_s *tx_desc_pool;
  482. uint16_t num_desc_per_page;
  483. QDF_STATUS status;
  484. status = dp_tx_tcl_desc_pool_init_rh(soc, num_elem, pool_id);
  485. if (QDF_IS_STATUS_ERROR(status)) {
  486. dp_err("failed to initialise tcl desc pool %d", pool_id);
  487. goto err_out;
  488. }
  489. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  490. if (QDF_IS_STATUS_ERROR(status)) {
  491. dp_err("failed to initialise tx ext desc pool %d", pool_id);
  492. goto err_deinit_tcl_pool;
  493. }
  494. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem, pool_id);
  495. if (QDF_IS_STATUS_ERROR(status)) {
  496. dp_err("failed to initialise tso desc pool %d", pool_id);
  497. goto err_deinit_tx_ext_pool;
  498. }
  499. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem, pool_id);
  500. if (QDF_IS_STATUS_ERROR(status)) {
  501. dp_err("failed to initialise tso num seg pool %d", pool_id);
  502. goto err_deinit_tso_pool;
  503. }
  504. tx_desc_pool = &soc->tx_desc[pool_id];
  505. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  506. tx_desc = tx_desc_pool->freelist;
  507. count = 0;
  508. pool_id_32 = (uint32_t)pool_id;
  509. num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
  510. while (tx_desc) {
  511. page_id = count / num_desc_per_page;
  512. offset = count % num_desc_per_page;
  513. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  514. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  515. tx_desc->id = id;
  516. tx_desc->pool_id = pool_id;
  517. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  518. dp_tx_alloc_tcl_desc_rh(tcl_desc_pool, tx_desc, count);
  519. tx_desc = tx_desc->next;
  520. count++;
  521. }
  522. return QDF_STATUS_SUCCESS;
  523. err_deinit_tso_pool:
  524. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  525. err_deinit_tx_ext_pool:
  526. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  527. err_deinit_tcl_pool:
  528. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  529. err_out:
  530. /* TODO: is assert needed ? */
  531. qdf_assert_always(0);
  532. return status;
  533. }
  534. void dp_tx_desc_pool_deinit_rh(struct dp_soc *soc,
  535. struct dp_tx_desc_pool_s *tx_desc_pool,
  536. uint8_t pool_id, bool spcl_tx_desc)
  537. {
  538. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  539. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  540. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  541. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  542. }
  543. QDF_STATUS dp_tx_compute_tx_delay_rh(struct dp_soc *soc,
  544. struct dp_vdev *vdev,
  545. struct hal_tx_completion_status *ts,
  546. uint32_t *delay_us)
  547. {
  548. return QDF_STATUS_SUCCESS;
  549. }
  550. QDF_STATUS dp_tx_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  551. uint8_t pool_id)
  552. {
  553. QDF_STATUS status;
  554. status = dp_tx_tcl_desc_pool_alloc_rh(soc, num_elem, pool_id);
  555. if (QDF_IS_STATUS_ERROR(status)) {
  556. dp_err("failed to allocate tcl desc pool %d", pool_id);
  557. goto err_tcl_desc_pool;
  558. }
  559. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  560. if (QDF_IS_STATUS_ERROR(status)) {
  561. dp_err("failed to allocate tx ext desc pool %d", pool_id);
  562. goto err_free_tcl_pool;
  563. }
  564. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  565. if (QDF_IS_STATUS_ERROR(status)) {
  566. dp_err("failed to allocate tso desc pool %d", pool_id);
  567. goto err_free_tx_ext_pool;
  568. }
  569. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem, pool_id);
  570. if (QDF_IS_STATUS_ERROR(status)) {
  571. dp_err("failed to allocate tso num seg pool %d", pool_id);
  572. goto err_free_tso_pool;
  573. }
  574. return status;
  575. err_free_tso_pool:
  576. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  577. err_free_tx_ext_pool:
  578. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  579. err_free_tcl_pool:
  580. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  581. err_tcl_desc_pool:
  582. /* TODO: is assert needed ? */
  583. qdf_assert_always(0);
  584. return status;
  585. }
  586. void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  587. {
  588. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  589. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  590. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  591. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  592. }
  593. void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg)
  594. {
  595. struct dp_tx_desc_s *tx_desc = NULL;
  596. struct dp_tx_desc_s *head_desc = NULL;
  597. struct dp_tx_desc_s *tail_desc = NULL;
  598. uint32_t sw_cookie;
  599. uint32_t num_msdus;
  600. uint32_t *msg_word;
  601. uint8_t ring_id;
  602. uint8_t tx_status;
  603. int i;
  604. DP_HIST_INIT();
  605. msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
  606. num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
  607. msg_word += HTT_SOFT_UMAC_TX_COMPL_IND_SIZE >> 2;
  608. for (i = 0; i < num_msdus; i++) {
  609. sw_cookie = HTT_TX_BUFFER_ADDR_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1));
  610. tx_desc = dp_tx_comp_find_tx_desc_rh(soc, sw_cookie);
  611. if (!tx_desc) {
  612. dp_err("failed to find tx desc");
  613. qdf_assert_always(0);
  614. }
  615. /*
  616. * If the descriptor is already freed in vdev_detach,
  617. * continue to next descriptor
  618. */
  619. if (qdf_unlikely((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  620. !tx_desc->flags)) {
  621. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  622. tx_desc->id);
  623. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  624. dp_tx_desc_check_corruption(tx_desc);
  625. goto next_msdu;
  626. }
  627. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  628. dp_tx_comp_info_rl("pdev in down state %d",
  629. tx_desc->id);
  630. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  631. dp_tx_comp_free_buf(soc, tx_desc, false);
  632. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  633. goto next_msdu;
  634. }
  635. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  636. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  637. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  638. tx_desc->flags, tx_desc->id);
  639. qdf_assert_always(0);
  640. }
  641. if (HTT_TX_BUFFER_ADDR_INFO_RELEASE_SOURCE_GET(*(msg_word + 1)) ==
  642. HTT_TX_MSDU_RELEASE_SOURCE_FW)
  643. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
  644. else
  645. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_TQM;
  646. tx_desc->peer_id = HTT_TX_MSDU_INFO_SW_PEER_ID_GET(*(msg_word + 2));
  647. tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(*(msg_word + 3));
  648. tx_desc->tx_status =
  649. (tx_status == HTT_TX_MSDU_RELEASE_REASON_FRAME_ACKED ?
  650. HAL_TX_TQM_RR_FRAME_ACKED : HAL_TX_TQM_RR_REM_CMD_REM);
  651. qdf_mem_copy(&tx_desc->comp, msg_word, HTT_TX_MSDU_INFO_SIZE);
  652. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  653. /* First ring descriptor on the cycle */
  654. if (!head_desc) {
  655. head_desc = tx_desc;
  656. tail_desc = tx_desc;
  657. }
  658. tail_desc->next = tx_desc;
  659. tx_desc->next = NULL;
  660. tail_desc = tx_desc;
  661. next_msdu:
  662. msg_word += HTT_TX_MSDU_INFO_SIZE >> 2;
  663. }
  664. /* For now, pass ring_id as 0 (zero) as WCN6450 only
  665. * supports one TX ring.
  666. */
  667. ring_id = 0;
  668. if (head_desc)
  669. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  670. DP_STATS_INC(soc, tx.tx_comp[ring_id], num_msdus);
  671. DP_TX_HIST_STATS_PER_PDEV();
  672. }