dp_rh_tx.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "cdp_txrx_cmn_struct.h"
  19. #include "dp_types.h"
  20. #include "dp_tx.h"
  21. #include "dp_rh_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include <dp_internal.h>
  24. #include <dp_htt.h>
  25. #include <hal_rh_api.h>
  26. #include <hal_rh_tx.h>
  27. #include "dp_peer.h"
  28. #include "dp_rh.h"
  29. #include <ce_api.h>
  30. #include <ce_internal.h>
  31. #include "dp_rh_htt.h"
  32. extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
  33. #if defined(FEATURE_TSO)
  34. /**
  35. * dp_tx_adjust_tso_download_len_rh() - Adjust download length for TSO packet
  36. * @nbuf: socket buffer
  37. * @msdu_info: handle to struct dp_tx_msdu_info_s
  38. * @download_len: Packet download length that needs adjustment
  39. *
  40. * Return: uint32_t (Adjusted packet download length)
  41. */
  42. static uint32_t
  43. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  44. struct dp_tx_msdu_info_s *msdu_info,
  45. uint32_t download_len)
  46. {
  47. uint32_t frag0_len;
  48. uint32_t delta;
  49. uint32_t eit_hdr_len;
  50. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  51. download_len -= frag0_len;
  52. eit_hdr_len = msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].length;
  53. /* If EIT header length is less than the MSDU download length, then
  54. * adjust the download length to just hold EIT header.
  55. */
  56. if (eit_hdr_len < download_len) {
  57. delta = download_len - eit_hdr_len;
  58. download_len -= delta;
  59. }
  60. return download_len;
  61. }
  62. #else
  63. static uint32_t
  64. dp_tx_adjust_tso_download_len_rh(qdf_nbuf_t nbuf,
  65. struct dp_tx_msdu_info_s *msdu_info,
  66. uint32_t download_len)
  67. {
  68. return download_len;
  69. }
  70. #endif /* FEATURE_TSO */
  71. void dp_tx_comp_get_params_from_hal_desc_rh(struct dp_soc *soc,
  72. void *tx_comp_hal_desc,
  73. struct dp_tx_desc_s **r_tx_desc)
  74. {
  75. }
  76. /**
  77. * dp_tx_comp_find_tx_desc_rh() - Find software TX descriptor using sw_cookie
  78. *
  79. * @soc: Handle to DP SoC structure
  80. * @sw_cookie: Key to find the TX descriptor
  81. *
  82. * Return: TX descriptor handle or NULL (if not found)
  83. */
  84. static struct dp_tx_desc_s *
  85. dp_tx_comp_find_tx_desc_rh(struct dp_soc *soc, uint32_t sw_cookie)
  86. {
  87. uint8_t pool_id;
  88. struct dp_tx_desc_s *tx_desc;
  89. pool_id = (sw_cookie & DP_TX_DESC_ID_POOL_MASK) >>
  90. DP_TX_DESC_ID_POOL_OS;
  91. /* Find Tx descriptor */
  92. tx_desc = dp_tx_desc_find(soc, pool_id,
  93. (sw_cookie & DP_TX_DESC_ID_PAGE_MASK) >>
  94. DP_TX_DESC_ID_PAGE_OS,
  95. (sw_cookie & DP_TX_DESC_ID_OFFSET_MASK) >>
  96. DP_TX_DESC_ID_OFFSET_OS);
  97. /* pool id is not matching. Error */
  98. if (tx_desc && tx_desc->pool_id != pool_id) {
  99. dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
  100. pool_id, tx_desc->pool_id);
  101. qdf_assert_always(0);
  102. }
  103. return tx_desc;
  104. }
  105. void dp_tx_process_htt_completion_rh(struct dp_soc *soc,
  106. struct dp_tx_desc_s *tx_desc,
  107. uint8_t *status,
  108. uint8_t ring_id)
  109. {
  110. }
  111. static inline uint32_t
  112. dp_tx_adjust_download_len_rh(qdf_nbuf_t nbuf, uint32_t download_len)
  113. {
  114. uint32_t frag0_len; /* TCL_DATA_CMD */
  115. uint32_t frag1_len; /* 64 byte payload */
  116. frag0_len = qdf_nbuf_get_frag_len(nbuf, 0);
  117. frag1_len = download_len - frag0_len;
  118. if (qdf_unlikely(qdf_nbuf_len(nbuf) < frag1_len))
  119. frag1_len = qdf_nbuf_len(nbuf);
  120. return frag0_len + frag1_len;
  121. }
  122. static inline void dp_tx_fill_nbuf_data_attr_rh(qdf_nbuf_t nbuf)
  123. {
  124. uint32_t pkt_offset;
  125. uint32_t tx_classify;
  126. uint32_t data_attr;
  127. /* Enable tx_classify bit in CE SRC DESC for all data packets */
  128. tx_classify = 1;
  129. pkt_offset = qdf_nbuf_get_frag_len(nbuf, 0);
  130. data_attr = tx_classify << CE_DESC_TX_CLASSIFY_BIT_S;
  131. data_attr |= pkt_offset << CE_DESC_PKT_OFFSET_BIT_S;
  132. qdf_nbuf_data_attr_set(nbuf, data_attr);
  133. }
  134. #ifdef DP_TX_HW_DESC_HISTORY
  135. static inline void
  136. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  137. {
  138. struct dp_tx_hw_desc_history *tx_hw_desc_history =
  139. &soc->tx_hw_desc_history;
  140. struct dp_tx_hw_desc_evt *evt;
  141. uint32_t idx = 0;
  142. uint16_t slot = 0;
  143. if (!tx_hw_desc_history->allocated)
  144. return;
  145. dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
  146. &slot,
  147. DP_TX_HW_DESC_HIST_SLOT_SHIFT,
  148. DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
  149. DP_TX_HW_DESC_HIST_MAX);
  150. evt = &tx_hw_desc_history->entry[slot][idx];
  151. qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  152. evt->posted = qdf_get_log_timestamp();
  153. evt->tcl_ring_id = 0;
  154. }
  155. #else
  156. static inline void
  157. dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
  158. {
  159. }
  160. #endif
  161. QDF_STATUS
  162. dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
  163. struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
  164. struct cdp_tx_exception_metadata *tx_exc_metadata,
  165. struct dp_tx_msdu_info_s *msdu_info)
  166. {
  167. struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(vdev->pdev);
  168. struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
  169. uint32_t download_len = tx_ep_info->download_len;
  170. qdf_nbuf_t nbuf = tx_desc->nbuf;
  171. uint8_t tid = msdu_info->tid;
  172. uint32_t *hal_tx_desc_cached;
  173. int ret;
  174. /*
  175. * Setting it initialization statically here to avoid
  176. * a memset call jump with qdf_mem_set call
  177. */
  178. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  179. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  180. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  181. tx_exc_metadata->sec_type : vdev->sec_type);
  182. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  183. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  184. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  185. return QDF_STATUS_E_RESOURCES;
  186. }
  187. hal_tx_desc_cached = (void *)cached_desc;
  188. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  189. tx_desc->dma_addr, 0, tx_desc->id,
  190. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
  191. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  192. vdev->lmac_id);
  193. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  194. vdev->search_type);
  195. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  196. vdev->bss_ast_idx);
  197. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  198. sec_type_map[sec_type]);
  199. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  200. (vdev->bss_ast_hash & 0xF));
  201. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  202. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  203. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  204. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  205. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  206. vdev->hal_desc_addr_search_flags);
  207. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  208. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  209. /* verify checksum offload configuration*/
  210. if ((qdf_nbuf_get_tx_cksum(nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP) ||
  211. qdf_nbuf_is_tso(nbuf)) {
  212. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  213. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  214. }
  215. if (tid != HTT_TX_EXT_TID_INVALID)
  216. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  217. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  218. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  219. if (!dp_tx_desc_set_ktimestamp(vdev, tx_desc))
  220. dp_tx_desc_set_timestamp(tx_desc);
  221. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  222. tx_desc->length,
  223. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
  224. (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
  225. tx_desc->id);
  226. hal_tx_desc_sync(hal_tx_desc_cached, tx_desc->tcl_cmd_vaddr);
  227. qdf_nbuf_frag_push_head(nbuf, DP_RH_TX_TCL_DESC_SIZE,
  228. (char *)tx_desc->tcl_cmd_vaddr,
  229. tx_desc->tcl_cmd_paddr);
  230. download_len = dp_tx_adjust_download_len_rh(nbuf, download_len);
  231. if (qdf_nbuf_is_tso(nbuf)) {
  232. QDF_NBUF_CB_PADDR(nbuf) =
  233. msdu_info->u.tso_info.curr_seg->seg.tso_frags[0].paddr;
  234. download_len = dp_tx_adjust_tso_download_len_rh(nbuf, msdu_info,
  235. download_len);
  236. }
  237. dp_tx_fill_nbuf_data_attr_rh(nbuf);
  238. ret = ce_send_fast(tx_ep_info->ce_tx_hdl, nbuf,
  239. tx_ep_info->tx_endpoint, download_len);
  240. if (!ret) {
  241. dp_verbose_debug("CE tx ring full");
  242. /* TODO: Should this be a separate ce_ring_full stat? */
  243. DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
  244. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  245. goto enqueue_fail;
  246. }
  247. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  248. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
  249. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
  250. status = QDF_STATUS_SUCCESS;
  251. dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc);
  252. enqueue_fail:
  253. dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
  254. qdf_get_log_timestamp(), tx_desc->nbuf);
  255. return status;
  256. }
  257. /**
  258. * dp_tx_tcl_desc_pool_alloc_rh() - Allocate the tcl descriptor pool
  259. * based on pool_id
  260. * @soc: Handle to DP SoC structure
  261. * @num_elem: Number of descriptor elements per pool
  262. * @pool_id: Pool to allocate
  263. *
  264. * Return: QDF_STATUS_SUCCESS
  265. * QDF_STATUS_E_NOMEM
  266. */
  267. static QDF_STATUS
  268. dp_tx_tcl_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  269. uint8_t pool_id)
  270. {
  271. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  272. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  273. uint16_t elem_size = DP_RH_TX_TCL_DESC_SIZE;
  274. QDF_STATUS status = QDF_STATUS_SUCCESS;
  275. qdf_dma_context_t memctx = 0;
  276. if (pool_id > MAX_TXDESC_POOLS - 1)
  277. return QDF_STATUS_E_INVAL;
  278. /* Allocate tcl descriptors in coherent memory */
  279. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  280. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  281. dp_desc_multi_pages_mem_alloc(soc, DP_TX_TCL_DESC_TYPE,
  282. &tcl_desc_pool->desc_pages,
  283. elem_size, num_elem, memctx, false);
  284. if (!tcl_desc_pool->desc_pages.num_pages) {
  285. dp_err("failed to allocate tcl desc Pages");
  286. status = QDF_STATUS_E_NOMEM;
  287. goto err_alloc_fail;
  288. }
  289. return status;
  290. err_alloc_fail:
  291. dp_desc_multi_pages_mem_free(soc, DP_TX_TCL_DESC_TYPE,
  292. &tcl_desc_pool->desc_pages,
  293. memctx, false);
  294. return status;
  295. }
  296. /**
  297. * dp_tx_tcl_desc_pool_free_rh() - Free the tcl descriptor pool
  298. * @soc: Handle to DP SoC structure
  299. * @pool_id: pool to free
  300. *
  301. */
  302. static void dp_tx_tcl_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  303. {
  304. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  305. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  306. qdf_dma_context_t memctx = 0;
  307. if (pool_id > MAX_TXDESC_POOLS - 1)
  308. return;
  309. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  310. memctx = qdf_get_dma_mem_context(tcl_desc_pool, memctx);
  311. dp_desc_multi_pages_mem_free(soc, DP_TX_TCL_DESC_TYPE,
  312. &tcl_desc_pool->desc_pages,
  313. memctx, false);
  314. }
  315. /**
  316. * dp_tx_tcl_desc_pool_init_rh() - Initialize tcl descriptor pool
  317. * based on pool_id
  318. * @soc: Handle to DP SoC structure
  319. * @num_elem: Number of descriptor elements per pool
  320. * @pool_id: pool to initialize
  321. *
  322. * Return: QDF_STATUS_SUCCESS
  323. * QDF_STATUS_E_FAULT
  324. */
  325. static QDF_STATUS
  326. dp_tx_tcl_desc_pool_init_rh(struct dp_soc *soc, uint32_t num_elem,
  327. uint8_t pool_id)
  328. {
  329. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  330. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  331. struct qdf_mem_dma_page_t *page_info;
  332. QDF_STATUS status;
  333. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  334. tcl_desc_pool->elem_size = DP_RH_TX_TCL_DESC_SIZE;
  335. tcl_desc_pool->elem_count = num_elem;
  336. /* Link tcl descriptors into a freelist */
  337. if (qdf_mem_multi_page_link(soc->osdev, &tcl_desc_pool->desc_pages,
  338. tcl_desc_pool->elem_size,
  339. tcl_desc_pool->elem_count,
  340. false)) {
  341. dp_err("failed to link tcl desc Pages");
  342. status = QDF_STATUS_E_FAULT;
  343. goto err_link_fail;
  344. }
  345. page_info = tcl_desc_pool->desc_pages.dma_pages;
  346. tcl_desc_pool->freelist = (uint32_t *)page_info->page_v_addr_start;
  347. return QDF_STATUS_SUCCESS;
  348. err_link_fail:
  349. return status;
  350. }
  351. /**
  352. * dp_tx_tcl_desc_pool_deinit_rh() - De-initialize tcl descriptor pool
  353. * based on pool_id
  354. * @soc: Handle to DP SoC structure
  355. * @pool_id: pool to de-initialize
  356. *
  357. */
  358. static void dp_tx_tcl_desc_pool_deinit_rh(struct dp_soc *soc, uint8_t pool_id)
  359. {
  360. }
  361. /**
  362. * dp_tx_alloc_tcl_desc_rh() - Allocate a tcl descriptor from the pool
  363. * @tcl_desc_pool: Tcl descriptor pool
  364. * @tx_desc: SW TX descriptor
  365. * @index: Index into the tcl descriptor pool
  366. */
  367. static void dp_tx_alloc_tcl_desc_rh(struct dp_tx_tcl_desc_pool_s *tcl_desc_pool,
  368. struct dp_tx_desc_s *tx_desc,
  369. uint32_t index)
  370. {
  371. struct qdf_mem_dma_page_t *dma_page;
  372. uint32_t page_id;
  373. uint32_t offset;
  374. tx_desc->tcl_cmd_vaddr = (void *)tcl_desc_pool->freelist;
  375. if (tcl_desc_pool->freelist)
  376. tcl_desc_pool->freelist =
  377. *((uint32_t **)tcl_desc_pool->freelist);
  378. page_id = index / tcl_desc_pool->desc_pages.num_element_per_page;
  379. offset = index % tcl_desc_pool->desc_pages.num_element_per_page;
  380. dma_page = &tcl_desc_pool->desc_pages.dma_pages[page_id];
  381. tx_desc->tcl_cmd_paddr =
  382. dma_page->page_p_addr + offset * tcl_desc_pool->elem_size;
  383. }
  384. QDF_STATUS dp_tx_desc_pool_init_rh(struct dp_soc *soc,
  385. uint32_t num_elem,
  386. uint8_t pool_id)
  387. {
  388. struct dp_soc_rh *rh_soc = dp_get_rh_soc_from_dp_soc(soc);
  389. uint32_t id, count, page_id, offset, pool_id_32;
  390. struct dp_tx_desc_s *tx_desc;
  391. struct dp_tx_tcl_desc_pool_s *tcl_desc_pool;
  392. struct dp_tx_desc_pool_s *tx_desc_pool;
  393. uint16_t num_desc_per_page;
  394. QDF_STATUS status;
  395. status = dp_tx_tcl_desc_pool_init_rh(soc, num_elem, pool_id);
  396. if (QDF_IS_STATUS_ERROR(status)) {
  397. dp_err("failed to initialise tcl desc pool %d", pool_id);
  398. goto err_out;
  399. }
  400. status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
  401. if (QDF_IS_STATUS_ERROR(status)) {
  402. dp_err("failed to initialise tx ext desc pool %d", pool_id);
  403. goto err_deinit_tcl_pool;
  404. }
  405. status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem, pool_id);
  406. if (QDF_IS_STATUS_ERROR(status)) {
  407. dp_err("failed to initialise tso desc pool %d", pool_id);
  408. goto err_deinit_tx_ext_pool;
  409. }
  410. status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem, pool_id);
  411. if (QDF_IS_STATUS_ERROR(status)) {
  412. dp_err("failed to initialise tso num seg pool %d", pool_id);
  413. goto err_deinit_tso_pool;
  414. }
  415. tx_desc_pool = &soc->tx_desc[pool_id];
  416. tcl_desc_pool = &rh_soc->tcl_desc_pool[pool_id];
  417. tx_desc = tx_desc_pool->freelist;
  418. count = 0;
  419. pool_id_32 = (uint32_t)pool_id;
  420. num_desc_per_page = tx_desc_pool->desc_pages.num_element_per_page;
  421. while (tx_desc) {
  422. page_id = count / num_desc_per_page;
  423. offset = count % num_desc_per_page;
  424. id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
  425. (page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
  426. tx_desc->id = id;
  427. tx_desc->pool_id = pool_id;
  428. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  429. dp_tx_alloc_tcl_desc_rh(tcl_desc_pool, tx_desc, count);
  430. tx_desc = tx_desc->next;
  431. count++;
  432. }
  433. return QDF_STATUS_SUCCESS;
  434. err_deinit_tso_pool:
  435. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  436. err_deinit_tx_ext_pool:
  437. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  438. err_deinit_tcl_pool:
  439. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  440. err_out:
  441. /* TODO: is assert needed ? */
  442. qdf_assert_always(0);
  443. return status;
  444. }
  445. void dp_tx_desc_pool_deinit_rh(struct dp_soc *soc,
  446. struct dp_tx_desc_pool_s *tx_desc_pool,
  447. uint8_t pool_id)
  448. {
  449. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  450. dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
  451. dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
  452. dp_tx_tcl_desc_pool_deinit_rh(soc, pool_id);
  453. }
  454. QDF_STATUS dp_tx_compute_tx_delay_rh(struct dp_soc *soc,
  455. struct dp_vdev *vdev,
  456. struct hal_tx_completion_status *ts,
  457. uint32_t *delay_us)
  458. {
  459. return QDF_STATUS_SUCCESS;
  460. }
  461. QDF_STATUS dp_tx_desc_pool_alloc_rh(struct dp_soc *soc, uint32_t num_elem,
  462. uint8_t pool_id)
  463. {
  464. QDF_STATUS status;
  465. status = dp_tx_tcl_desc_pool_alloc_rh(soc, num_elem, pool_id);
  466. if (QDF_IS_STATUS_ERROR(status)) {
  467. dp_err("failed to allocate tcl desc pool %d\n", pool_id);
  468. goto err_tcl_desc_pool;
  469. }
  470. status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  471. if (QDF_IS_STATUS_ERROR(status)) {
  472. dp_err("failed to allocate tx ext desc pool %d\n", pool_id);
  473. goto err_free_tcl_pool;
  474. }
  475. status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem, pool_id);
  476. if (QDF_IS_STATUS_ERROR(status)) {
  477. dp_err("failed to allocate tso desc pool %d\n", pool_id);
  478. goto err_free_tx_ext_pool;
  479. }
  480. status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem, pool_id);
  481. if (QDF_IS_STATUS_ERROR(status)) {
  482. dp_err("failed to allocate tso num seg pool %d\n", pool_id);
  483. goto err_free_tso_pool;
  484. }
  485. return status;
  486. err_free_tso_pool:
  487. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  488. err_free_tx_ext_pool:
  489. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  490. err_free_tcl_pool:
  491. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  492. err_tcl_desc_pool:
  493. /* TODO: is assert needed ? */
  494. qdf_assert_always(0);
  495. return status;
  496. }
  497. void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id)
  498. {
  499. dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
  500. dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
  501. dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
  502. dp_tx_tcl_desc_pool_free_rh(soc, pool_id);
  503. }
  504. void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg)
  505. {
  506. struct dp_tx_desc_s *tx_desc = NULL;
  507. struct dp_tx_desc_s *head_desc = NULL;
  508. struct dp_tx_desc_s *tail_desc = NULL;
  509. uint32_t sw_cookie;
  510. uint32_t num_msdus;
  511. uint32_t *msg_word;
  512. uint8_t ring_id;
  513. uint8_t tx_status;
  514. int i;
  515. DP_HIST_INIT();
  516. msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
  517. num_msdus = HTT_SOFT_UMAC_TX_COMP_IND_MSDU_COUNT_GET(*msg_word);
  518. msg_word += HTT_SOFT_UMAC_TX_COMPL_IND_SIZE >> 2;
  519. for (i = 0; i < num_msdus; i++) {
  520. sw_cookie = HTT_TX_BUFFER_ADDR_INFO_SW_BUFFER_COOKIE_GET(*(msg_word + 1));
  521. tx_desc = dp_tx_comp_find_tx_desc_rh(soc, sw_cookie);
  522. if (!tx_desc) {
  523. dp_err("failed to find tx desc");
  524. qdf_assert_always(0);
  525. }
  526. /*
  527. * If the descriptor is already freed in vdev_detach,
  528. * continue to next descriptor
  529. */
  530. if (qdf_unlikely((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  531. !tx_desc->flags)) {
  532. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  533. tx_desc->id);
  534. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  535. dp_tx_desc_check_corruption(tx_desc);
  536. goto next_msdu;
  537. }
  538. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  539. dp_tx_comp_info_rl("pdev in down state %d",
  540. tx_desc->id);
  541. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  542. dp_tx_comp_free_buf(soc, tx_desc, false);
  543. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  544. goto next_msdu;
  545. }
  546. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  547. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  548. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  549. tx_desc->flags, tx_desc->id);
  550. qdf_assert_always(0);
  551. }
  552. if (HTT_TX_BUFFER_ADDR_INFO_RELEASE_SOURCE_GET(*(msg_word + 1)) ==
  553. HTT_TX_MSDU_RELEASE_SOURCE_FW)
  554. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
  555. else
  556. tx_desc->buffer_src = HAL_TX_COMP_RELEASE_SOURCE_TQM;
  557. tx_desc->peer_id = HTT_TX_MSDU_INFO_SW_PEER_ID_GET(*(msg_word + 2));
  558. tx_status = HTT_TX_MSDU_INFO_RELEASE_REASON_GET(*(msg_word + 3));
  559. tx_desc->tx_status =
  560. (tx_status == HTT_TX_MSDU_RELEASE_REASON_FRAME_ACKED ?
  561. HAL_TX_TQM_RR_FRAME_ACKED : HAL_TX_TQM_RR_REM_CMD_REM);
  562. qdf_mem_copy(&tx_desc->comp, msg_word, HTT_TX_MSDU_INFO_SIZE);
  563. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  564. /* First ring descriptor on the cycle */
  565. if (!head_desc) {
  566. head_desc = tx_desc;
  567. tail_desc = tx_desc;
  568. }
  569. tail_desc->next = tx_desc;
  570. tx_desc->next = NULL;
  571. tail_desc = tx_desc;
  572. next_msdu:
  573. msg_word += HTT_TX_MSDU_INFO_SIZE >> 2;
  574. }
  575. /* For now, pass ring_id as 0 (zero) as WCN6450 only
  576. * supports one TX ring.
  577. */
  578. ring_id = 0;
  579. if (head_desc)
  580. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  581. DP_STATS_INC(soc, tx.tx_comp[ring_id], num_msdus);
  582. DP_TX_HIST_STATS_PER_PDEV();
  583. }