dp_rh_tx.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /*
  2. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "cdp_txrx_cmn_struct.h"
  19. #include "dp_types.h"
  20. #include "dp_tx.h"
  21. #include "dp_rh_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include <dp_internal.h>
  24. #include <dp_htt.h>
  25. #include <hal_rh_api.h>
  26. #include <hal_rh_tx.h>
  27. #include "dp_peer.h"
  28. #include "dp_rh.h"
  29. #include <ce_api.h>
  30. #include <ce_internal.h>
  31. #include "dp_rh_htt.h"
  32. extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
  33. #if defined(FEATURE_TSO)
  34. /**
  35. * dp_tx_adjust_tso_download_len_rh() - Adjust download length for TSO packet
  36. * @nbuf: socket buffer
  37. * @msdu_info: handle to struct dp_tx_msdu_info_s
  38. * @download_len: Packet download length that needs adjustment
  39. *
  40. * Return: uint32_t (Adjusted packet download length)
  41. */
  42. static uint32_t
  43. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  44. struct dp_tx_msdu_info_s *msdu_info,
  45. uint32_t download_len)
  46. {
  47. uint32_t frag0_len;
  48. uint32_t delta;
  49. uint32_t eit_hdr_len;
  50. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  51. download_len -= frag0_len;
  52. eit_hdr_len = msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].length;
  53. /* If EIT header length is less than the MSDU download length, then
  54. * adjust the download length to just hold EIT header.
  55. */
  56. if (eit_hdr_len < download_len) {
  57. delta = download_len - eit_hdr_len;
  58. download_len -= delta;
  59. }
  60. return download_len;
  61. }
  62. #else
  63. static uint32_t
  64. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  65. struct dp_tx_msdu_info_s *msdu_info,
  66. uint32_t download_len)
  67. {
  68. return download_len;
  69. }
  70. #endif /* FEATURE_TSO */
  71. void dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc *soc,
  72. void *tx_comp_hal_desc,
  73. struct dp_tx_desc_s **r_tx_desc)
  74. {
  75. }
  76. /**
  77. * dp_tx_comp_find_tx_desc_rh() - Find software TX descriptor using sw_cookie
  78. *
  79. * @soc: Handle to DP SoC structure
  80. * @sw_cookie: Key to find the TX descriptor
  81. *
  82. * Return: TX descriptor handle or NULL (if not found)
  83. */
  84. static struct dp_tx_desc_s *
  85. dp_tx_comp_find_tx_desc_rh(struct dp_soc *soc, uint32_t sw_cookie)
  86. {
  87. uint8_t pool_id;
  88. struct dp_tx_desc_s *tx_desc;
  89. pool_id = (sw_cookie & DP_TX_DESC_ID_POOL_MASK) >>
  90. DP_TX_DESC_ID_POOL_OS;
  91. /* Find Tx descriptor */
  92. tx_desc = dp_tx_desc_find(soc, pool_id,
  93. (sw_cookie & DP_TX_DESC_ID_PAGE_MASK) >>
  94. DP_TX_DESC_ID_PAGE_OS,
  95. (sw_cookie & DP_TX_DESC_ID_OFFSET_MASK) >>
  96. DP_TX_DESC_ID_OFFSET_OS);
  97. /* pool id is not matching. Error */
  98. if (tx_desc && tx_desc->pool_id != pool_id) {
  99. dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
  100. pool_id, tx_desc->pool_id);
  101. qdf_assert_always(0);
  102. }
  103. return tx_desc;
  104. }
  105. void dp_tx_process_htt_completion_rh(struct dp_soc *soc,
  106. struct dp_tx_desc_s *tx_desc,
  107. uint8_t *status,
  108. uint8_t ring_id)
  109. {
  110. }
  111. static inline uint32_t
  112. dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf, uint32_t download_len)
  113. {
  114. uint32_t frag0_len; /* TCL_DATA_CMD */
  115. uint32_t frag1_len; /* 64 byte payload */
  116. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  117. frag1_len = download_len - frag0_len;
  118. if (qdf_unlikely(qdf_nbuf_len(nbuf) < frag1_len))
  119. frag1_len = qdf_nbuf_len(nbuf);
  120. return frag0_len + frag1_len;
  121. }
  122. static inline void dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)
  123. {
  124. uint32_t pkt_offset;
  125. uint32_t tx_classify;
  126. uint32_t data_attr;
  127. /* Enable tx_classify bit in CE SRC DESC for all data packets */
  128. tx_classify = 1;
  129. pkt_offset = qdf_nbuf_get_frag_len(nbuf, 0);
  130. data_attr = tx_classify << CE_DESC_TX_CLASSIFY_BIT_S;
  131. data_attr |= pkt_offset << CE_DESC_PKT_OFFSET_BIT_S;
  132. qdf_nbuf_data_attr_set(nbuf, data_attr);
  133. }
  134. #ifdef DP_TX_HW_DESC_HISTORY
  135. static inline void
  136. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  137. {
  138. struct dp_tx_hw_desc_history *tx_hw_desc_history =
  139. &soc->tx_hw_desc_history;
  140. struct dp_tx_hw_desc_evt *evt;
  141. uint32_t idx = 0;
  142. uint16_t slot = 0;
  143. if (!tx_hw_desc_history->allocated)
  144. return;
  145. dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
  146. &slot,
  147. DP_TX_HW_DESC_HIST_SLOT_SHIFT,
  148. DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
  149. DP_TX_HW_DESC_HIST_MAX);
  150. evt = &tx_hw_desc_history->entry[slot][idx];
  151. qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  152. evt->posted = qdf_get_log_timestamp();
  153. evt->tcl_ring_id = 0;
  154. }
  155. #else
  156. static inline void
  157. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  158. {
  159. }
  160. #endif
  161. #if defined(FEATURE_RUNTIME_PM)
  162. static void dp_tx_update_write_index(struct dp_soc *soc,
  163. struct dp_tx_ep_info_rh *tx_ep_info)
  164. {
  165. int ret;
  166. /* Avoid runtime get and put APIs under high throughput scenarios */
  167. if (dp_get_rtpm_tput_policy_requirement(soc)) {
  168. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  169. true);
  170. return;
  171. }
  172. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  173. if (QDF_IS_STATUS_SUCCESS(ret)) {
  174. if (hif_system_pm_state_check(soc->hif_handle)) {
  175. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
  176. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  177. CE_RING_FLUSH_EVENT);
  178. } else {
  179. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  180. true);
  181. }
  182. } else {
  183. dp_runtime_get(soc);
  184. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  185. false);
  186. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  187. CE_RING_FLUSH_EVENT);
  188. qdf_atomic_inc(&soc->tx_pending_rtpm);
  189. dp_runtime_put(soc);
  190. }
  191. }
  192. #elif defined(DP_POWER_SAVE)
  193. static void dp_tx_update_write_index(struct dp_soc *soc,
  194. struct dp_tx_ep_info_rh *tx_ep_info)
  195. {
  196. if (hif_system_pm_state_check(soc->hif_handle)) {
  197. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  198. false);
  199. ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
  200. CE_RING_FLUSH_EVENT);
  201. } else {
  202. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  203. true);
  204. }
  205. }
  206. #else
  207. static void dp_tx_update_write_index(struct dp_soc *soc,
  208. struct dp_tx_ep_info_rh *tx_ep_info)
  209. {
  210. ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
  211. true);
  212. }
  213. #endif
  214. QDF_STATUS
  215. dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
  216. struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
  217. struct cdp_tx_exception_metadata *tx_exc_metadata,
  218. struct dp_tx_msdu_info_s *msdu_info)
  219. {
  220. struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(vdev->pdev);
  221. struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
  222. uint32_t download_len = tx_ep_info->download_len;
  223. qdf_nbuf_t nbuf = tx_desc->nbuf;
  224. uint8_t tid = msdu_info->tid;
  225. uint32_t *hal_tx_desc_cached;
  226. int ret;
  227. /*
  228. * Setting it initialization statically here to avoid
  229. * a memset call jump with qdf_mem_set call
  230. */
  231. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  232. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  233. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  234. tx_exc_metadata->sec_type : vdev->sec_type);
  235. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  236. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  237. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  238. return QDF_STATUS_E_RESOURCES;
  239. }
  240. hal_tx_desc_cached = (void *)cached_desc;
  241. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  242. tx_desc->dma_addr, 0, tx_desc->id,
  243. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
  244. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  245. vdev->lmac_id);
  246. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  247. vdev->search_type);
  248. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  249. vdev->bss_ast_idx);
  250. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  251. sec_type_map[sec_type]);
  252. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  253. (vdev->bss_ast_hash & 0xF));
  254. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  255. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  256. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  257. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  258. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  259. vdev->hal_desc_addr_search_flags);
  260. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  261. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  262. /* verify checksum offload configuration*/
  263. if ((qdf_nbuf_get_tx_cksum(nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) ||
  264. qdf_nbuf_is_tso(nbuf)) {
  265. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  266. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  267. }
  268. if (tid != HTT_TX_EXT_TID_INVALID)
  269. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  270. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  271. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  272. if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
  273. dp_tx_desc_set_timestamp(tx_desc);
  274. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  275. tx_desc->length,
  276. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
  277. (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
  278. tx_desc->id);
  279. hal_tx_desc_sync(hal_tx_desc_cached, tx_desc->tcl_cmd_vaddr);
  280. qdf_nbuf_frag_push_head(nbuf, DP_RH_TX_TCL_DESC_SIZE,
  281. (char *)tx_desc->tcl_cmd_vaddr,
  282. tx_desc->tcl_cmd_paddr);
  283. download_len = dp_tx_adjust_download_len_rh(nbuf, download_len);
  284. if (qdf_nbuf_is_tso(nbuf)) {
  285. QDF_NBUF_CB_PADDR(nbuf) =
  286. msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].paddr;
  287. download_len = dp_tx_adjust_tso_download_len_rh(nbuf, msdu_info,
  288. download_len);
  289. }
  290. dp_tx_fill_nbuf_data_attr_rh(nbuf);
  291. ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
  292. ret = ce_enqueue_desc(tx_ep_info->ce_tx_hdl, nbuf,
  293. tx_ep_info->tx_endpoint, download_len);
  294. if (ret) {
  295. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  296. dp_verbose_debug("CE tx ring full");
  297. /* TODO: Should this be a separate ce_ring_full stat? */
  298. DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
  299. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  300. goto enqueue_fail;
  301. }
  302. dp_tx_update_write_index(soc, tx_ep_info);
  303. ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
  304. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  305. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
  306. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
  307. status = QDF_STATUS_SUCCESS;
  308. dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc);
  309. enqueue_fail:
  310. dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
  311. qdf_get_log_timestamp(), tx_desc->nbuf);
  312. return status;
  313. }
  314. /**
  315. * dp_tx_tcl_desc_pool_alloc_rh() - Allocate the tcl descriptor pool
  316. * based on pool_id
  317. * @soc: Handle to DP SoC structure
  318. * @num_elem: Number of descriptor elements per pool
  319. * @pool_id: Pool to allocate
  320. *
  321. * Return: QDF_STATUS_SUCCESS
  322. * QDF_STATUS_E_NOMEM
  323. */
  324. static QDF_STATUS
  325. dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  326. uint8_t pool_id)
  327. {
  328. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  329. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  330. uint16_t elem_size = DP_RH_TX_TCL_DESC_SIZE;
  331. QDF_STATUS status = QDF_STATUS_SUCCESS;
  332. qdf_dma_context_t memctx = 0;
  333. if (pool_id > MAX_TXDESC_POOLS - 1)
  334. return QDF_STATUS_E_INVAL;
  335. /* Allocate tcl descriptors in coherent memory */
  336. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  337. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  338. dp_desc_multi_pages_mem_alloc(soc, DP_TX_TCL_DESC_TYPE,
  339. &tcl_desc_pool->desc_pages,
  340. elem_size, num_elem, memctx, false);
  341. if (!tcl_desc_pool->desc_pages.num_pages) {
  342. dp_err("failed to allocate tcl desc Pages");
  343. status = QDF_STATUS_E_NOMEM;
  344. goto err_alloc_fail;
  345. }
  346. return status;
  347. err_alloc_fail:
  348. dp_desc_multi_pages_mem_free(soc, DP_TX_TCL_DESC_TYPE,
  349. &tcl_desc_pool->desc_pages,
  350. memctx, false);
  351. return status;
  352. }
  353. /**
  354. * dp_tx_tcl_desc_pool_free_rh() - Free the tcl descriptor pool
  355. * @soc: Handle to DP SoC structure
  356. * @pool_id: pool to free
  357. *
  358. */
  359. static void dp_tx_tcl_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  360. {
  361. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  362. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  363. qdf_dma_context_t memctx = 0;
  364. if (pool_id > MAX_TXDESC_POOLS - 1)
  365. return;
  366. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  367. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  368. dp_desc_multi_pages_mem_free(soc, DP_TX_TCL_DESC_TYPE,
  369. &tcl_desc_pool->desc_pages,
  370. memctx, false);
  371. }
  372. /**
  373. * dp_tx_tcl_desc_pool_init_rh() - Initialize tcl descriptor pool
  374. * based on pool_id
  375. * @soc: Handle to DP SoC structure
  376. * @num_elem: Number of descriptor elements per pool
  377. * @pool_id: pool to initialize
  378. *
  379. * Return: QDF_STATUS_SUCCESS
  380. * QDF_STATUS_E_FAULT
  381. */
  382. static QDF_STATUS
  383. dp_tx_tcl_desc_pool_init_rh(struct dp_soc *soc, uint32_t num_elem,
  384. uint8_t pool_id)
  385. {
  386. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  387. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  388. struct qdf_mem_dma_page_t *page_info;
  389. QDF_STATUS status;
  390. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  391. tcl_desc_pool->elem_size = DP_RH_TX_TCL_DESC_SIZE;
  392. tcl_desc_pool->elem_count = num_elem;
  393. /* Link tcl descriptors into a freelist */
  394. if (qdf_mem_multi_page_link(soc->osdev, &tcl_desc_pool->desc_pages,
  395. tcl_desc_pool->elem_size,
  396. tcl_desc_pool->elem_count,
  397. false)) {
  398. dp_err("failed to link tcl desc Pages");
  399. status = QDF_STATUS_E_FAULT;
  400. goto err_link_fail;
  401. }
  402. page_info = tcl_desc_pool->desc_pages.dma_pages;
  403. tcl_desc_pool->freelist = (uint32_t *)page_info->page_v_addr_start;
  404. return QDF_STATUS_SUCCESS;
  405. err_link_fail:
  406. return status;
  407. }
  408. /**
  409. * dp_tx_tcl_desc_pool_deinit_rh() - De-initialize tcl descriptor pool
  410. * based on pool_id
  411. * @soc: Handle to DP SoC structure
  412. * @pool_id: pool to de-initialize
  413. *
  414. */
  415. static void dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc *soc, uint8_t pool_id)
  416. {
  417. }
  418. /**
  419. * dp_tx_alloc_tcl_desc_rh() - Allocate a tcl descriptor from the pool
  420. * @tcl_desc_pool: Tcl descriptor pool
  421. * @tx_desc: SW TX descriptor
  422. * @index: Index into the tcl descriptor pool
  423. */
  424. static void dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s *tcl_desc_pool,
  425. struct dp_tx_desc_s *tx_desc,
  426. uint32_t index)
  427. {
  428. struct qdf_mem_dma_page_t *dma_page;
  429. uint32_t page_id;
  430. uint32_t offset;
  431. tx_desc->tcl_cmd_vaddr = (void *)tcl_desc_pool->freelist;
  432. if (tcl_desc_pool->freelist)
  433. tcl_desc_pool->freelist =
  434. *((uint32_t **)tcl_desc_pool->freelist);
  435. page_id = index / tcl_desc_pool->desc_pages.num_element_per_page;
  436. offset = index % tcl_desc_pool->desc_pages.num_element_per_page;
  437. dma_page = &tcl_desc_pool->desc_pages.dma_pages[page_id];
  438. tx_desc->tcl_cmd_paddr =
  439. dma_page->page_p_addr + offset * tcl_desc_pool->elem_size;
  440. }
  441. QDF_STATUS dp_tx_desc_pool_init_rh(struct dp_soc *soc,
  442. uint32_t num_elem,
  443. uint8_t pool_id)
  444. {
  445. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  446. uint32_t id, count, page_id, offset, pool_id_32;
  447. struct dp_tx_desc_s *tx_desc;
  448. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  449. struct dp_tx_desc_pool_s *tx_desc_pool;
  450. uint16_t num_desc_per_page;
  451. QDF_STATUS status;
  452. status = dp_tx_tcl_desc_pool_init_rh(soc, num_elem, pool_id);
  453. if (QDF_IS_STATUS_ERROR(status)) {
  454. dp_err("failed to initialise tcl desc pool %d", pool_id);
  455. goto err_out;
  456. }
  457. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  458. if (QDF_IS_STATUS_ERROR(status)) {
  459. dp_err("failed to initialise tx ext desc pool %d", pool_id);
  460. goto err_deinit_tcl_pool;
  461. }
  462. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem, pool_id);
  463. if (QDF_IS_STATUS_ERROR(status)) {
  464. dp_err("failed to initialise tso desc pool %d", pool_id);
  465. goto err_deinit_tx_ext_pool;
  466. }
  467. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem, pool_id);
  468. if (QDF_IS_STATUS_ERROR(status)) {
  469. dp_err("failed to initialise tso num seg pool %d", pool_id);
  470. goto err_deinit_tso_pool;
  471. }
  472. tx_desc_pool = &soc->tx_desc[pool_id];
  473. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  474. tx_desc = tx_desc_pool->freelist;
  475. count = 0;
  476. pool_id_32 = (uint32_t)pool_id;
  477. num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
  478. while (tx_desc) {
  479. page_id = count / num_desc_per_page;
  480. offset = count % num_desc_per_page;
  481. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  482. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  483. tx_desc->id = id;
  484. tx_desc->pool_id = pool_id;
  485. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  486. dp_tx_alloc_tcl_desc_rh(tcl_desc_pool, tx_desc, count);
  487. tx_desc = tx_desc->next;
  488. count++;
  489. }
  490. return QDF_STATUS_SUCCESS;
  491. err_deinit_tso_pool:
  492. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  493. err_deinit_tx_ext_pool:
  494. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  495. err_deinit_tcl_pool:
  496. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  497. err_out:
  498. /* TODO: is assert needed ? */
  499. qdf_assert_always(0);
  500. return status;
  501. }
  502. void dp_tx_desc_pool_deinit_rh(struct dp_soc *soc,
  503. struct dp_tx_desc_pool_s *tx_desc_pool,
  504. uint8_t pool_id)
  505. {
  506. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  507. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  508. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  509. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  510. }
  511. QDF_STATUS dp_tx_compute_tx_delay_rh(struct dp_soc *soc,
  512. struct dp_vdev *vdev,
  513. struct hal_tx_completion_status *ts,
  514. uint32_t *delay_us)
  515. {
  516. return QDF_STATUS_SUCCESS;
  517. }
  518. QDF_STATUS dp_tx_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  519. uint8_t pool_id)
  520. {
  521. QDF_STATUS status;
  522. status = dp_tx_tcl_desc_pool_alloc_rh(soc, num_elem, pool_id);
  523. if (QDF_IS_STATUS_ERROR(status)) {
  524. dp_err("failed to allocate tcl desc pool %d\n", pool_id);
  525. goto err_tcl_desc_pool;
  526. }
  527. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  528. if (QDF_IS_STATUS_ERROR(status)) {
  529. dp_err("failed to allocate tx ext desc pool %d\n", pool_id);
  530. goto err_free_tcl_pool;
  531. }
  532. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  533. if (QDF_IS_STATUS_ERROR(status)) {
  534. dp_err("failed to allocate tso desc pool %d\n", pool_id);
  535. goto err_free_tx_ext_pool;
  536. }
  537. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem, pool_id);
  538. if (QDF_IS_STATUS_ERROR(status)) {
  539. dp_err("failed to allocate tso num seg pool %d\n", pool_id);
  540. goto err_free_tso_pool;
  541. }
  542. return status;
  543. err_free_tso_pool:
  544. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  545. err_free_tx_ext_pool:
  546. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  547. err_free_tcl_pool:
  548. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  549. err_tcl_desc_pool:
  550. /* TODO: is assert needed ? */
  551. qdf_assert_always(0);
  552. return status;
  553. }
  554. void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  555. {
  556. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  557. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  558. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  559. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  560. }
  561. void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg)
  562. {
  563. struct dp_tx_desc_s *tx_desc = NULL;
  564. struct dp_tx_desc_s *head_desc = NULL;
  565. struct dp_tx_desc_s *tail_desc = NULL;
  566. uint32_t sw_cookie;
  567. uint32_t num_msdus;
  568. uint32_t *msg_word;
  569. uint8_t ring_id;
  570. uint8_t tx_status;
  571. int i;
  572. DP_HIST_INIT();
  573. msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
  574. num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
  575. msg_word += HTT_SOFT_UMAC_TX_COMPL_IND_SIZE >> 2;
  576. for (i = 0; i < num_msdus; i++) {
  577. sw_cookie = HTT_TX_BUFFER_ADDR_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1));
  578. tx_desc = dp_tx_comp_find_tx_desc_rh(soc, sw_cookie);
  579. if (!tx_desc) {
  580. dp_err("failed to find tx desc");
  581. qdf_assert_always(0);
  582. }
  583. /*
  584. * If the descriptor is already freed in vdev_detach,
  585. * continue to next descriptor
  586. */
  587. if (qdf_unlikely((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  588. !tx_desc->flags)) {
  589. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  590. tx_desc->id);
  591. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  592. dp_tx_desc_check_corruption(tx_desc);
  593. goto next_msdu;
  594. }
  595. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  596. dp_tx_comp_info_rl("pdev in down state %d",
  597. tx_desc->id);
  598. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  599. dp_tx_comp_free_buf(soc, tx_desc, false);
  600. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  601. goto next_msdu;
  602. }
  603. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  604. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  605. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  606. tx_desc->flags, tx_desc->id);
  607. qdf_assert_always(0);
  608. }
  609. if (HTT_TX_BUFFER_ADDR_INFO_RELEASE_SOURCE_GET(*(msg_word + 1)) ==
  610. HTT_TX_MSDU_RELEASE_SOURCE_FW)
  611. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
  612. else
  613. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_TQM;
  614. tx_desc->peer_id = HTT_TX_MSDU_INFO_SW_PEER_ID_GET(*(msg_word + 2));
  615. tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(*(msg_word + 3));
  616. tx_desc->tx_status =
  617. (tx_status == HTT_TX_MSDU_RELEASE_REASON_FRAME_ACKED ?
  618. HAL_TX_TQM_RR_FRAME_ACKED : HAL_TX_TQM_RR_REM_CMD_REM);
  619. qdf_mem_copy(&tx_desc->comp, msg_word, HTT_TX_MSDU_INFO_SIZE);
  620. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  621. /* First ring descriptor on the cycle */
  622. if (!head_desc) {
  623. head_desc = tx_desc;
  624. tail_desc = tx_desc;
  625. }
  626. tail_desc->next = tx_desc;
  627. tx_desc->next = NULL;
  628. tail_desc = tx_desc;
  629. next_msdu:
  630. msg_word += HTT_TX_MSDU_INFO_SIZE >> 2;
  631. }
  632. /* For now, pass ring_id as 0 (zero) as WCN6450 only
  633. * supports one TX ring.
  634. */
  635. ring_id = 0;
  636. if (head_desc)
  637. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  638. DP_STATS_INC(soc, tx.tx_comp[ring_id], num_msdus);
  639. DP_TX_HIST_STATS_PER_PDEV();
  640. }