dp_be_tx.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "cdp_txrx_cmn_struct.h"
  20. #include "dp_types.h"
  21. #include "dp_tx.h"
  22. #include "dp_be_tx.h"
  23. #include "dp_tx_desc.h"
  24. #include "hal_tx.h"
  25. #include <hal_be_api.h>
  26. #include <hal_be_tx.h>
  27. #include <dp_htt.h>
  28. #ifdef FEATURE_WDS
  29. #include "dp_txrx_wds.h"
  30. #endif
  31. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  32. #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
  33. #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
  34. #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
  35. #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
  36. #else
  37. #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
  38. #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
  39. #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
  40. #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
  41. #endif
  42. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  43. #ifdef WLAN_MCAST_MLO
  44. /* MLO peer id for reinject*/
  45. #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
  46. #define MAX_GSN_NUM 0x0FFF
  47. #ifdef QCA_MULTIPASS_SUPPORT
  48. #define INVALID_VLAN_ID 0xFFFF
  49. #define MULTIPASS_WITH_VLAN_ID 0xFFFE
  50. /**
  51. * struct dp_mlo_mpass_buf - Multipass buffer
  52. * @vlan_id: vlan_id of frame
  53. * @nbuf: pointer to skb buf
  54. */
  55. struct dp_mlo_mpass_buf {
  56. uint16_t vlan_id;
  57. qdf_nbuf_t nbuf;
  58. };
  59. #endif
  60. #endif
  61. #endif
  62. #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
  63. HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
  64. #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
  65. HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
  66. #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
  67. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
  68. #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
  69. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
  70. #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
  71. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
  72. #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
  73. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
  74. extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
  75. #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
  76. /*
  77. * Value to mark ring desc is invalidated by buffer_virt_addr_63_32 field
  78. * of WBM2SW ring Desc.
  79. */
  80. #define DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE 0x12121212
  81. /**
  82. * dp_tx_comp_desc_check_and_invalidate() - sanity check for ring desc and
  83. * invalidate it after each reaping
  84. * @tx_comp_hal_desc: ring desc virtual address
  85. * @r_tx_desc: pointer to current dp TX Desc pointer
  86. * @tx_desc_va: the original 64 bits Desc VA got from ring Desc
  87. * @hw_cc_done: HW cookie conversion done or not
  88. *
  89. * If HW CC is done, check the buffer_virt_addr_63_32 value to know if
  90. * ring Desc is stale or not. if HW CC is not done, then compare PA between
  91. * ring Desc and current TX desc.
  92. *
  93. * Return: None.
  94. */
  95. static inline
  96. void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
  97. struct dp_tx_desc_s **r_tx_desc,
  98. uint64_t tx_desc_va,
  99. bool hw_cc_done)
  100. {
  101. qdf_dma_addr_t desc_dma_addr;
  102. if (qdf_likely(hw_cc_done)) {
  103. /* Check upper 32 bits */
  104. if (DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE ==
  105. (tx_desc_va >> 32))
  106. *r_tx_desc = NULL;
  107. /* Invalidate the ring desc for 32 ~ 63 bits of VA */
  108. hal_tx_comp_set_desc_va_63_32(
  109. tx_comp_hal_desc,
  110. DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE);
  111. } else {
  112. /* Compare PA between ring desc and current TX desc stored */
  113. desc_dma_addr = hal_tx_comp_get_paddr(tx_comp_hal_desc);
  114. if (desc_dma_addr != (*r_tx_desc)->dma_addr)
  115. *r_tx_desc = NULL;
  116. }
  117. }
  118. #else
  119. static inline
  120. void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
  121. struct dp_tx_desc_s **r_tx_desc,
  122. uint64_t tx_desc_va,
  123. bool hw_cc_done)
  124. {
  125. }
  126. #endif
  127. #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
  128. #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
  129. void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
  130. void *tx_comp_hal_desc,
  131. struct dp_tx_desc_s **r_tx_desc)
  132. {
  133. uint32_t tx_desc_id;
  134. uint64_t tx_desc_va = 0;
  135. bool hw_cc_done =
  136. hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc);
  137. if (qdf_likely(hw_cc_done)) {
  138. /* HW cookie conversion done */
  139. tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
  140. *r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
  141. } else {
  142. /* SW do cookie conversion to VA */
  143. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  144. *r_tx_desc =
  145. (struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
  146. }
  147. dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
  148. r_tx_desc, tx_desc_va,
  149. hw_cc_done);
  150. if (*r_tx_desc)
  151. (*r_tx_desc)->peer_id =
  152. dp_tx_comp_get_peer_id_be(soc,
  153. tx_comp_hal_desc);
  154. }
  155. #else
  156. void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
  157. void *tx_comp_hal_desc,
  158. struct dp_tx_desc_s **r_tx_desc)
  159. {
  160. uint64_t tx_desc_va;
  161. tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
  162. *r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
  163. dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
  164. r_tx_desc,
  165. tx_desc_va,
  166. true);
  167. if (*r_tx_desc)
  168. (*r_tx_desc)->peer_id =
  169. dp_tx_comp_get_peer_id_be(soc,
  170. tx_comp_hal_desc);
  171. }
  172. #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
  173. #else
  174. void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
  175. void *tx_comp_hal_desc,
  176. struct dp_tx_desc_s **r_tx_desc)
  177. {
  178. uint32_t tx_desc_id;
  179. /* SW do cookie conversion to VA */
  180. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  181. *r_tx_desc =
  182. (struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
  183. dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
  184. r_tx_desc, 0,
  185. false);
  186. if (*r_tx_desc)
  187. (*r_tx_desc)->peer_id =
  188. dp_tx_comp_get_peer_id_be(soc,
  189. tx_comp_hal_desc);
  190. }
  191. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
  192. static inline
  193. void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
  194. {
  195. struct dp_vdev *vdev;
  196. uint8_t vdev_id;
  197. uint32_t *htt_desc = (uint32_t *)status;
  198. qdf_assert_always(!soc->mec_fw_offload);
  199. /*
  200. * Get vdev id from HTT status word in case of MEC
  201. * notification
  202. */
  203. vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
  204. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  205. return;
  206. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  207. DP_MOD_ID_HTT_COMP);
  208. if (!vdev)
  209. return;
  210. dp_tx_mec_handler(vdev, status);
  211. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  212. }
  213. void dp_tx_process_htt_completion_be(struct dp_soc *soc,
  214. struct dp_tx_desc_s *tx_desc,
  215. uint8_t *status,
  216. uint8_t ring_id)
  217. {
  218. uint8_t tx_status;
  219. struct dp_pdev *pdev;
  220. struct dp_vdev *vdev = NULL;
  221. struct hal_tx_completion_status ts = {0};
  222. uint32_t *htt_desc = (uint32_t *)status;
  223. struct dp_txrx_peer *txrx_peer;
  224. dp_txrx_ref_handle txrx_ref_handle = NULL;
  225. struct cdp_tid_tx_stats *tid_stats = NULL;
  226. struct htt_soc *htt_handle;
  227. uint8_t vdev_id;
  228. uint16_t peer_id;
  229. tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
  230. htt_handle = (struct htt_soc *)soc->htt_handle;
  231. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  232. /*
  233. * There can be scenario where WBM consuming descriptor enqueued
  234. * from TQM2WBM first and TQM completion can happen before MEC
  235. * notification comes from FW2WBM. Avoid access any field of tx
  236. * descriptor in case of MEC notify.
  237. */
  238. if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
  239. return dp_tx_process_mec_notify_be(soc, status);
  240. /*
  241. * If the descriptor is already freed in vdev_detach,
  242. * continue to next descriptor
  243. */
  244. if (qdf_unlikely(!tx_desc->flags)) {
  245. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  246. tx_desc->id);
  247. return;
  248. }
  249. if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
  250. dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
  251. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  252. goto release_tx_desc;
  253. }
  254. pdev = tx_desc->pdev;
  255. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  256. dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
  257. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  258. goto release_tx_desc;
  259. }
  260. qdf_assert(tx_desc->pdev);
  261. vdev_id = tx_desc->vdev_id;
  262. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  263. DP_MOD_ID_HTT_COMP);
  264. if (qdf_unlikely(!vdev)) {
  265. dp_tx_comp_info_rl("Unable to get vdev ref %d", tx_desc->id);
  266. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  267. goto release_tx_desc;
  268. }
  269. switch (tx_status) {
  270. case HTT_TX_FW2WBM_TX_STATUS_OK:
  271. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  272. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  273. {
  274. uint8_t tid;
  275. if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
  276. ts.peer_id =
  277. DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
  278. htt_desc[3]);
  279. ts.tid =
  280. DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
  281. htt_desc[3]);
  282. } else {
  283. ts.peer_id = HTT_INVALID_PEER;
  284. ts.tid = HTT_INVALID_TID;
  285. }
  286. ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
  287. ts.ppdu_id =
  288. DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
  289. htt_desc[2]);
  290. ts.ack_frame_rssi =
  291. DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
  292. htt_desc[2]);
  293. ts.tsf = htt_desc[4];
  294. ts.first_msdu = 1;
  295. ts.last_msdu = 1;
  296. switch (tx_status) {
  297. case HTT_TX_FW2WBM_TX_STATUS_OK:
  298. ts.status = HAL_TX_TQM_RR_FRAME_ACKED;
  299. break;
  300. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  301. ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
  302. break;
  303. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  304. ts.status = HAL_TX_TQM_RR_REM_CMD_TX;
  305. break;
  306. }
  307. tid = ts.tid;
  308. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  309. tid = CDP_MAX_DATA_TIDS - 1;
  310. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  311. if (qdf_unlikely(pdev->delay_stats_flag) ||
  312. qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
  313. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  314. if (tx_status < CDP_MAX_TX_HTT_STATUS)
  315. tid_stats->htt_status_cnt[tx_status]++;
  316. peer_id = dp_tx_comp_adjust_peer_id_be(soc, ts.peer_id);
  317. txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id,
  318. &txrx_ref_handle,
  319. DP_MOD_ID_HTT_COMP);
  320. if (qdf_likely(txrx_peer))
  321. dp_tx_update_peer_basic_stats(
  322. txrx_peer,
  323. qdf_nbuf_len(tx_desc->nbuf),
  324. tx_status,
  325. pdev->enhanced_stats_en);
  326. dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
  327. ring_id);
  328. dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
  329. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  330. if (qdf_likely(txrx_peer))
  331. dp_txrx_peer_unref_delete(txrx_ref_handle,
  332. DP_MOD_ID_HTT_COMP);
  333. break;
  334. }
  335. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  336. {
  337. uint8_t reinject_reason;
  338. reinject_reason =
  339. HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
  340. htt_desc[1]);
  341. dp_tx_reinject_handler(soc, vdev, tx_desc,
  342. status, reinject_reason);
  343. break;
  344. }
  345. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  346. {
  347. dp_tx_inspect_handler(soc, vdev, tx_desc, status);
  348. break;
  349. }
  350. case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
  351. {
  352. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  353. goto release_tx_desc;
  354. }
  355. default:
  356. dp_tx_comp_err("Invalid HTT tx_status %d\n",
  357. tx_status);
  358. goto release_tx_desc;
  359. }
  360. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  361. return;
  362. release_tx_desc:
  363. dp_tx_comp_free_buf(soc, tx_desc, false);
  364. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  365. if (vdev)
  366. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  367. }
  368. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  369. #ifdef DP_TX_IMPLICIT_RBM_MAPPING
  370. /**
  371. * dp_tx_get_rbm_id_be() - Get the RBM ID for data transmission completion.
  372. * @soc: DP soc structure pointer
  373. * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
  374. *
  375. * Return: RBM ID corresponding to TCL ring_id
  376. */
  377. static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
  378. uint8_t ring_id)
  379. {
  380. return 0;
  381. }
  382. #else
  383. static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
  384. uint8_t ring_id)
  385. {
  386. return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
  387. HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
  388. }
  389. #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
  390. #else
  391. static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
  392. uint8_t tcl_index)
  393. {
  394. uint8_t rbm;
  395. rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
  396. dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
  397. return rbm;
  398. }
  399. #endif
  400. #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
  401. /**
  402. * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
  403. * @soc: DP soc structure pointer
  404. * @hal_tx_desc: HAL descriptor where fields are set
  405. * @nbuf: skb to be considered for min rates
  406. *
  407. * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
  408. * and uses it to determine if the frame is critical. For a critical frame,
  409. * flow override bits are set to classify the frame into HW's high priority
  410. * queue. The HW will pick pre-configured min rates for such packets.
  411. *
  412. * Return: None
  413. */
  414. static void
  415. dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
  416. uint32_t *hal_tx_desc,
  417. qdf_nbuf_t nbuf)
  418. {
  419. /*
  420. * Critical frames should be queued to the high priority queue for the TID on
  421. * on which they are sent out (for the concerned peer).
  422. * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
  423. * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
  424. * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
  425. * HOL queue.
  426. */
  427. if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
  428. hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
  429. hal_tx_desc_set_flow_override(hal_tx_desc, 0);
  430. hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
  431. hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
  432. TX_SEMI_HARD_NOTIFY_E);
  433. }
  434. }
  435. #else
  436. static inline void
  437. dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
  438. uint32_t *hal_tx_desc_cached,
  439. qdf_nbuf_t nbuf)
  440. {
  441. }
  442. #endif
  443. #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
  444. /**
  445. * dp_tx_set_particular_tx_queue() - set particular TX TQM flow queue 3 for
  446. * TX packets, currently TCP ACK only
  447. * @soc: DP soc structure pointer
  448. * @hal_tx_desc: HAL descriptor where fields are set
  449. * @nbuf: skb to be considered for particular TX queue
  450. *
  451. * Return: None
  452. */
  453. static inline
  454. void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
  455. uint32_t *hal_tx_desc,
  456. qdf_nbuf_t nbuf)
  457. {
  458. if (!soc->tx_ilp_enable)
  459. return;
  460. if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
  461. QDF_NBUF_CB_PACKET_TYPE_TCP_ACK)) {
  462. hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
  463. hal_tx_desc_set_flow_override(hal_tx_desc, 1);
  464. hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
  465. }
  466. }
  467. #else
  468. static inline
  469. void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
  470. uint32_t *hal_tx_desc,
  471. qdf_nbuf_t nbuf)
  472. {
  473. }
  474. #endif
  475. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  476. defined(WLAN_MCAST_MLO)
  477. #ifdef QCA_MULTIPASS_SUPPORT
  478. /**
  479. * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list
  480. * @be_vdev: Handle to DP be_vdev structure
  481. * @ptnr_vdev: DP ptnr_vdev handle
  482. * @arg: pointer to dp_mlo_mpass_ buf
  483. *
  484. * Return: None
  485. */
  486. static void
  487. dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev,
  488. struct dp_vdev *ptnr_vdev,
  489. void *arg)
  490. {
  491. struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
  492. struct dp_txrx_peer *txrx_peer = NULL;
  493. struct vlan_ethhdr *veh = NULL;
  494. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf);
  495. uint16_t vlan_id = 0;
  496. bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
  497. (htons(eh->ether_type) != ETH_P_8021Q));
  498. if (qdf_unlikely(not_vlan))
  499. return;
  500. veh = (struct vlan_ethhdr *)eh;
  501. vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
  502. qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex);
  503. TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list,
  504. mpass_peer_list_elem) {
  505. if (vlan_id == txrx_peer->vlan_id) {
  506. qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
  507. ptr->vlan_id = vlan_id;
  508. return;
  509. }
  510. }
  511. qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
  512. }
  513. /**
  514. * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets
  515. * @be_vdev: Handle to DP be_vdev structure
  516. * @ptnr_vdev: DP ptnr_vdev handle
  517. * @arg: pointer to dp_mlo_mpass_ buf
  518. *
  519. * Return: None
  520. */
  521. static void
  522. dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
  523. struct dp_vdev *ptnr_vdev,
  524. void *arg)
  525. {
  526. struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
  527. struct dp_tx_msdu_info_s msdu_info;
  528. struct dp_vdev_be *be_ptnr_vdev = NULL;
  529. qdf_nbuf_t nbuf_clone;
  530. uint16_t group_key = 0;
  531. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  532. if (be_vdev != be_ptnr_vdev) {
  533. nbuf_clone = qdf_nbuf_clone(ptr->nbuf);
  534. if (qdf_unlikely(!nbuf_clone)) {
  535. dp_tx_debug("nbuf clone failed");
  536. return;
  537. }
  538. } else {
  539. nbuf_clone = ptr->nbuf;
  540. }
  541. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  542. dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
  543. msdu_info.gsn = be_vdev->seq_num;
  544. be_ptnr_vdev->seq_num = be_vdev->seq_num;
  545. if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
  546. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  547. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(
  548. msdu_info.meta_data[0], 1);
  549. } else {
  550. /* return when vlan map is not initialized */
  551. if (!ptnr_vdev->iv_vlan_map)
  552. return;
  553. group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id];
  554. /*
  555. * If group key is not installed, drop the frame.
  556. */
  557. if (!group_key)
  558. return;
  559. dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone);
  560. dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key);
  561. msdu_info.exception_fw = 1;
  562. }
  563. nbuf_clone = dp_tx_send_msdu_single(
  564. ptnr_vdev,
  565. nbuf_clone,
  566. &msdu_info,
  567. DP_MLO_MCAST_REINJECT_PEER_ID,
  568. NULL);
  569. if (qdf_unlikely(nbuf_clone)) {
  570. dp_info("pkt send failed");
  571. qdf_nbuf_free(nbuf_clone);
  572. return;
  573. }
  574. }
  575. /**
  576. * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing
  577. * @soc: DP soc handle
  578. * @vdev: DP vdev handle
  579. * @nbuf: nbuf to be enqueued
  580. *
  581. * Return: true if handling is done else false
  582. */
  583. static bool
  584. dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
  585. struct dp_vdev *vdev,
  586. qdf_nbuf_t nbuf)
  587. {
  588. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  589. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  590. qdf_nbuf_t nbuf_copy = NULL;
  591. struct dp_mlo_mpass_buf mpass_buf;
  592. memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf));
  593. mpass_buf.vlan_id = INVALID_VLAN_ID;
  594. mpass_buf.nbuf = nbuf;
  595. dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
  596. if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
  597. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  598. dp_tx_mlo_mcast_multipass_lookup,
  599. &mpass_buf, DP_MOD_ID_TX,
  600. DP_ALL_VDEV_ITER);
  601. /*
  602. * Do not drop the frame when vlan_id doesn't match.
  603. * Send the frame as it is.
  604. */
  605. if (mpass_buf.vlan_id == INVALID_VLAN_ID)
  606. return false;
  607. }
  608. /* AP can have classic clients, special clients &
  609. * classic repeaters.
  610. * 1. Classic clients & special client:
  611. * Remove vlan header, find corresponding group key
  612. * index, fill in metaheader and enqueue multicast
  613. * frame to TCL.
  614. * 2. Classic repeater:
  615. * Pass through to classic repeater with vlan tag
  616. * intact without any group key index. Hardware
  617. * will know which key to use to send frame to
  618. * repeater.
  619. */
  620. nbuf_copy = qdf_nbuf_copy(nbuf);
  621. /*
  622. * Send multicast frame to special peers even
  623. * if pass through to classic repeater fails.
  624. */
  625. if (nbuf_copy) {
  626. struct dp_mlo_mpass_buf mpass_buf_copy = {0};
  627. mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
  628. mpass_buf_copy.nbuf = nbuf_copy;
  629. /* send frame on partner vdevs */
  630. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  631. dp_tx_mlo_mcast_multipass_send,
  632. &mpass_buf_copy, DP_MOD_ID_TX,
  633. DP_LINK_VDEV_ITER);
  634. /* send frame on mcast primary vdev */
  635. dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy);
  636. if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
  637. be_vdev->seq_num = 0;
  638. else
  639. be_vdev->seq_num++;
  640. }
  641. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  642. dp_tx_mlo_mcast_multipass_send,
  643. &mpass_buf, DP_MOD_ID_TX, DP_LINK_VDEV_ITER);
  644. dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
  645. if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
  646. be_vdev->seq_num = 0;
  647. else
  648. be_vdev->seq_num++;
  649. return true;
  650. }
  651. #else
  652. static bool
  653. dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  654. qdf_nbuf_t nbuf)
  655. {
  656. return false;
  657. }
  658. #endif
  659. void
  660. dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
  661. struct dp_vdev *ptnr_vdev,
  662. void *arg)
  663. {
  664. qdf_nbuf_t nbuf = (qdf_nbuf_t)arg;
  665. qdf_nbuf_t nbuf_clone;
  666. struct dp_vdev_be *be_ptnr_vdev = NULL;
  667. struct dp_tx_msdu_info_s msdu_info;
  668. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  669. if (be_vdev != be_ptnr_vdev) {
  670. nbuf_clone = qdf_nbuf_clone(nbuf);
  671. if (qdf_unlikely(!nbuf_clone)) {
  672. dp_tx_debug("nbuf clone failed");
  673. return;
  674. }
  675. } else {
  676. nbuf_clone = nbuf;
  677. }
  678. /* NAWDS clients will accepts on 4 addr format MCAST packets
  679. * This will ensure to send packets in 4 addr format to NAWDS clients.
  680. */
  681. if (qdf_unlikely(ptnr_vdev->nawds_enabled)) {
  682. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  683. dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
  684. dp_tx_nawds_handler(ptnr_vdev->pdev->soc, ptnr_vdev,
  685. &msdu_info, nbuf_clone, DP_INVALID_PEER);
  686. }
  687. if (qdf_unlikely(dp_tx_proxy_arp(ptnr_vdev, nbuf_clone) !=
  688. QDF_STATUS_SUCCESS)) {
  689. qdf_nbuf_free(nbuf_clone);
  690. return;
  691. }
  692. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  693. dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
  694. msdu_info.gsn = be_vdev->seq_num;
  695. be_ptnr_vdev->seq_num = be_vdev->seq_num;
  696. nbuf_clone = dp_tx_send_msdu_single(
  697. ptnr_vdev,
  698. nbuf_clone,
  699. &msdu_info,
  700. DP_MLO_MCAST_REINJECT_PEER_ID,
  701. NULL);
  702. if (qdf_unlikely(nbuf_clone)) {
  703. dp_info("pkt send failed");
  704. qdf_nbuf_free(nbuf_clone);
  705. return;
  706. }
  707. }
  708. static inline void
  709. dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
  710. struct dp_vdev *vdev,
  711. struct dp_tx_msdu_info_s *msdu_info)
  712. {
  713. hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
  714. }
  715. void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
  716. struct dp_vdev *vdev,
  717. qdf_nbuf_t nbuf)
  718. {
  719. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  720. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  721. if (qdf_unlikely(vdev->multipass_en) &&
  722. dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
  723. return;
  724. /* send frame on partner vdevs */
  725. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  726. dp_tx_mlo_mcast_pkt_send,
  727. nbuf, DP_MOD_ID_REINJECT, DP_LINK_VDEV_ITER);
  728. /* send frame on mcast primary vdev */
  729. dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
  730. if (qdf_unlikely(be_vdev->seq_num > MAX_GSN_NUM))
  731. be_vdev->seq_num = 0;
  732. else
  733. be_vdev->seq_num++;
  734. }
  735. bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
  736. struct dp_vdev *vdev)
  737. {
  738. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  739. if (be_vdev->mcast_primary)
  740. return true;
  741. return false;
  742. }
  743. #if defined(CONFIG_MLO_SINGLE_DEV)
  744. static void
  745. dp_tx_mlo_mcast_enhance_be(struct dp_vdev_be *be_vdev,
  746. struct dp_vdev *ptnr_vdev,
  747. void *arg)
  748. {
  749. struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
  750. qdf_nbuf_t nbuf = (qdf_nbuf_t)arg;
  751. if (vdev == ptnr_vdev)
  752. return;
  753. /*
  754. * Hold the reference to avoid free of nbuf in
  755. * dp_tx_mcast_enhance() in case of successful
  756. * conversion
  757. */
  758. qdf_nbuf_ref(nbuf);
  759. if (qdf_unlikely(!dp_tx_mcast_enhance(ptnr_vdev, nbuf)))
  760. return;
  761. qdf_nbuf_free(nbuf);
  762. }
  763. qdf_nbuf_t
  764. dp_tx_mlo_mcast_send_be(struct dp_soc *soc, struct dp_vdev *vdev,
  765. qdf_nbuf_t nbuf,
  766. struct cdp_tx_exception_metadata *tx_exc_metadata)
  767. {
  768. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  769. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  770. if (!tx_exc_metadata->is_mlo_mcast)
  771. return nbuf;
  772. if (!be_vdev->mcast_primary) {
  773. qdf_nbuf_free(nbuf);
  774. return NULL;
  775. }
  776. /*
  777. * In the single netdev model avoid reinjection path as mcast
  778. * packet is identified in upper layers while peer search to find
  779. * primary TQM based on dest mac addr
  780. *
  781. * New bonding interface added into the bridge so MCSD will update
  782. * snooping table and wifi driver populates the entries in appropriate
  783. * child net devices.
  784. */
  785. if (vdev->mcast_enhancement_en) {
  786. /*
  787. * As dp_tx_mcast_enhance() can consume the nbuf incase of
  788. * successful conversion hold the reference of nbuf.
  789. *
  790. * Hold the reference to tx on partner links
  791. */
  792. qdf_nbuf_ref(nbuf);
  793. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf))) {
  794. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  795. dp_tx_mlo_mcast_enhance_be,
  796. nbuf, DP_MOD_ID_TX,
  797. DP_ALL_VDEV_ITER);
  798. qdf_nbuf_free(nbuf);
  799. return NULL;
  800. }
  801. /* release reference taken above */
  802. qdf_nbuf_free(nbuf);
  803. }
  804. dp_tx_mlo_mcast_handler_be(soc, vdev, nbuf);
  805. return NULL;
  806. }
  807. #endif
  808. #else
  809. static inline void
  810. dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
  811. struct dp_vdev *vdev,
  812. struct dp_tx_msdu_info_s *msdu_info)
  813. {
  814. hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
  815. }
  816. #endif
  817. #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
  818. !defined(WLAN_MCAST_MLO)
  819. void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
  820. struct dp_vdev *vdev,
  821. qdf_nbuf_t nbuf)
  822. {
  823. }
  824. bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
  825. struct dp_vdev *vdev)
  826. {
  827. return false;
  828. }
  829. #endif
  830. #ifdef CONFIG_SAWF
  831. /**
  832. * dp_sawf_config_be - Configure sawf specific fields in tcl
  833. *
  834. * @soc: DP soc handle
  835. * @hal_tx_desc_cached: tx descriptor
  836. * @fw_metadata: firmware metadata
  837. * @nbuf: skb buffer
  838. * @msdu_info: msdu info
  839. *
  840. * Return: void
  841. */
  842. void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
  843. uint16_t *fw_metadata, qdf_nbuf_t nbuf,
  844. struct dp_tx_msdu_info_s *msdu_info)
  845. {
  846. uint8_t q_id = 0;
  847. if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
  848. return;
  849. q_id = dp_sawf_queue_id_get(nbuf);
  850. if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
  851. return;
  852. msdu_info->tid = (q_id & (CDP_DATA_TID_MAX - 1));
  853. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached,
  854. (q_id & (CDP_DATA_TID_MAX - 1)));
  855. if ((q_id >= DP_SAWF_DEFAULT_QUEUE_MIN) &&
  856. (q_id < DP_SAWF_DEFAULT_QUEUE_MAX))
  857. return;
  858. dp_sawf_tcl_cmd(fw_metadata, nbuf);
  859. hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
  860. DP_TX_FLOW_OVERRIDE_ENABLE);
  861. hal_tx_desc_set_flow_override(hal_tx_desc_cached,
  862. DP_TX_FLOW_OVERRIDE_GET(q_id));
  863. hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
  864. DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
  865. }
  866. #else
  867. static inline
  868. void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
  869. uint16_t *fw_metadata, qdf_nbuf_t nbuf,
  870. struct dp_tx_msdu_info_s *msdu_info)
  871. {
  872. }
  873. static inline
  874. QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
  875. struct dp_tx_desc_s *tx_desc)
  876. {
  877. return QDF_STATUS_SUCCESS;
  878. }
  879. static inline
  880. QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
  881. struct dp_tx_desc_s *tx_desc)
  882. {
  883. return QDF_STATUS_SUCCESS;
  884. }
  885. #endif
  886. #ifdef WLAN_SUPPORT_PPEDS
  887. /**
  888. * dp_ppeds_stats() - Accounting fw2wbm_tx_drop drops in Tx path
  889. * @soc: Handle to DP Soc structure
  890. * @peer_id: Peer ID in the descriptor
  891. *
  892. * Return: NONE
  893. */
  894. static inline
  895. void dp_ppeds_stats(struct dp_soc *soc, uint16_t peer_id)
  896. {
  897. struct dp_vdev *vdev = NULL;
  898. struct dp_txrx_peer *txrx_peer = NULL;
  899. dp_txrx_ref_handle txrx_ref_handle = NULL;
  900. DP_STATS_INC(soc, tx.fw2wbm_tx_drop, 1);
  901. txrx_peer = dp_txrx_peer_get_ref_by_id(soc,
  902. peer_id,
  903. &txrx_ref_handle,
  904. DP_MOD_ID_TX_COMP);
  905. if (txrx_peer) {
  906. vdev = txrx_peer->vdev;
  907. DP_STATS_INC(vdev, tx_i.dropped.fw2wbm_tx_drop, 1);
  908. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
  909. }
  910. }
  911. int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota)
  912. {
  913. uint32_t num_avail_for_reap = 0;
  914. void *tx_comp_hal_desc;
  915. uint8_t buf_src, status = 0;
  916. uint32_t count = 0;
  917. struct dp_tx_desc_s *tx_desc = NULL;
  918. struct dp_tx_desc_s *head_desc = NULL;
  919. struct dp_tx_desc_s *tail_desc = NULL;
  920. struct dp_soc *soc = &be_soc->soc;
  921. void *last_prefetch_hw_desc = NULL;
  922. struct dp_tx_desc_s *last_prefetch_sw_desc = NULL;
  923. qdf_nbuf_t nbuf;
  924. hal_soc_handle_t hal_soc = soc->hal_soc;
  925. hal_ring_handle_t hal_ring_hdl =
  926. be_soc->ppeds_wbm_release_ring.hal_srng;
  927. struct dp_txrx_peer *txrx_peer = NULL;
  928. uint16_t peer_id = CDP_INVALID_PEER;
  929. dp_txrx_ref_handle txrx_ref_handle = NULL;
  930. struct dp_vdev *vdev = NULL;
  931. struct dp_pdev *pdev = NULL;
  932. struct dp_srng *srng;
  933. if (qdf_unlikely(dp_srng_access_start(NULL, soc, hal_ring_hdl))) {
  934. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  935. return 0;
  936. }
  937. num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
  938. if (num_avail_for_reap >= quota)
  939. num_avail_for_reap = quota;
  940. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  941. last_prefetch_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
  942. num_avail_for_reap);
  943. srng = &be_soc->ppeds_wbm_release_ring;
  944. if (srng) {
  945. hal_update_ring_util(soc->hal_soc, srng->hal_srng,
  946. WBM2SW_RELEASE,
  947. &be_soc->ppeds_wbm_release_ring.stats);
  948. }
  949. while (qdf_likely(num_avail_for_reap--)) {
  950. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  951. if (qdf_unlikely(!tx_comp_hal_desc))
  952. break;
  953. buf_src = hal_tx_comp_get_buffer_source(hal_soc,
  954. tx_comp_hal_desc);
  955. if (qdf_unlikely(buf_src != HAL_TX_COMP_RELEASE_SOURCE_TQM &&
  956. buf_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  957. dp_err("Tx comp release_src != TQM | FW but from %d",
  958. buf_src);
  959. qdf_assert_always(0);
  960. }
  961. dp_tx_comp_get_params_from_hal_desc_be(soc, tx_comp_hal_desc,
  962. &tx_desc);
  963. if (!tx_desc) {
  964. dp_err("unable to retrieve tx_desc!");
  965. qdf_assert_always(0);
  966. continue;
  967. }
  968. if (qdf_unlikely(!(tx_desc->flags &
  969. DP_TX_DESC_FLAG_ALLOCATED) ||
  970. !(tx_desc->flags & DP_TX_DESC_FLAG_PPEDS))) {
  971. qdf_assert_always(0);
  972. continue;
  973. }
  974. tx_desc->buffer_src = buf_src;
  975. if (qdf_unlikely(buf_src == HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  976. status = hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  977. if (status != HTT_TX_FW2WBM_TX_STATUS_OK)
  978. dp_ppeds_stats(soc, tx_desc->peer_id);
  979. nbuf = dp_ppeds_tx_desc_free(soc, tx_desc);
  980. qdf_nbuf_free(nbuf);
  981. } else {
  982. tx_desc->tx_status =
  983. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  984. /*
  985. * Add desc sync to account for extended statistics
  986. * during Tx completion.
  987. */
  988. if (peer_id != tx_desc->peer_id) {
  989. if (txrx_peer) {
  990. dp_txrx_peer_unref_delete(txrx_ref_handle,
  991. DP_MOD_ID_TX_COMP);
  992. txrx_peer = NULL;
  993. vdev = NULL;
  994. pdev = NULL;
  995. }
  996. peer_id = tx_desc->peer_id;
  997. txrx_peer =
  998. dp_txrx_peer_get_ref_by_id(soc, peer_id,
  999. &txrx_ref_handle,
  1000. DP_MOD_ID_TX_COMP);
  1001. if (txrx_peer) {
  1002. vdev = txrx_peer->vdev;
  1003. if (!vdev)
  1004. goto next_desc;
  1005. pdev = vdev->pdev;
  1006. if (!pdev)
  1007. goto next_desc;
  1008. dp_tx_desc_update_fast_comp_flag(soc,
  1009. tx_desc,
  1010. !pdev->enhanced_stats_en);
  1011. if (pdev->enhanced_stats_en) {
  1012. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  1013. &tx_desc->comp, 1);
  1014. }
  1015. }
  1016. } else if (txrx_peer && vdev && pdev) {
  1017. dp_tx_desc_update_fast_comp_flag(soc,
  1018. tx_desc,
  1019. !pdev->enhanced_stats_en);
  1020. if (pdev->enhanced_stats_en) {
  1021. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  1022. &tx_desc->comp, 1);
  1023. }
  1024. }
  1025. next_desc:
  1026. if (!head_desc) {
  1027. head_desc = tx_desc;
  1028. tail_desc = tx_desc;
  1029. }
  1030. tail_desc->next = tx_desc;
  1031. tx_desc->next = NULL;
  1032. tail_desc = tx_desc;
  1033. count++;
  1034. dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
  1035. num_avail_for_reap,
  1036. hal_ring_hdl,
  1037. &last_prefetch_hw_desc,
  1038. &last_prefetch_sw_desc);
  1039. }
  1040. }
  1041. dp_srng_access_end(NULL, soc, hal_ring_hdl);
  1042. if (txrx_peer)
  1043. dp_txrx_peer_unref_delete(txrx_ref_handle,
  1044. DP_MOD_ID_TX_COMP);
  1045. if (head_desc)
  1046. dp_tx_comp_process_desc_list(soc, head_desc,
  1047. CDP_MAX_TX_COMP_PPE_RING);
  1048. return count;
  1049. }
  1050. #endif
  1051. #if defined(QCA_SUPPORT_WDS_EXTENDED)
  1052. static inline void
  1053. dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
  1054. struct cdp_tx_exception_metadata *tx_exc_metadata,
  1055. uint16_t *ast_idx, uint16_t *ast_hash)
  1056. {
  1057. struct dp_peer *peer = NULL;
  1058. if (tx_exc_metadata->is_wds_extended) {
  1059. peer = dp_peer_get_ref_by_id(soc, tx_exc_metadata->peer_id,
  1060. DP_MOD_ID_TX);
  1061. if (peer) {
  1062. *ast_idx = peer->ast_idx;
  1063. *ast_hash = peer->ast_hash;
  1064. hal_tx_desc_set_index_lookup_override
  1065. (soc->hal_soc,
  1066. hal_tx_desc_cached,
  1067. 0x1);
  1068. dp_peer_unref_delete(peer, DP_MOD_ID_TX);
  1069. }
  1070. } else {
  1071. return;
  1072. }
  1073. }
  1074. #else
  1075. static inline void
  1076. dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
  1077. struct cdp_tx_exception_metadata *tx_exc_metadata,
  1078. uint16_t *ast_idx, uint16_t *ast_hash)
  1079. {
  1080. }
  1081. #endif
  1082. QDF_STATUS
  1083. dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
  1084. struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
  1085. struct cdp_tx_exception_metadata *tx_exc_metadata,
  1086. struct dp_tx_msdu_info_s *msdu_info)
  1087. {
  1088. void *hal_tx_desc;
  1089. uint32_t *hal_tx_desc_cached;
  1090. int coalesce = 0;
  1091. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1092. uint8_t ring_id = tx_q->ring_id;
  1093. uint8_t tid;
  1094. struct dp_vdev_be *be_vdev;
  1095. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  1096. uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
  1097. hal_ring_handle_t hal_ring_hdl = NULL;
  1098. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  1099. uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES;
  1100. uint16_t ast_idx = vdev->bss_ast_idx;
  1101. uint16_t ast_hash = vdev->bss_ast_hash;
  1102. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  1103. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  1104. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  1105. return QDF_STATUS_E_RESOURCES;
  1106. }
  1107. if (qdf_unlikely(tx_exc_metadata)) {
  1108. qdf_assert_always((tx_exc_metadata->tx_encap_type ==
  1109. CDP_INVALID_TX_ENCAP_TYPE) ||
  1110. (tx_exc_metadata->tx_encap_type ==
  1111. vdev->tx_encap_type));
  1112. if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
  1113. qdf_assert_always((tx_exc_metadata->sec_type ==
  1114. CDP_INVALID_SEC_TYPE) ||
  1115. tx_exc_metadata->sec_type ==
  1116. vdev->sec_type);
  1117. dp_get_peer_from_tx_exc_meta(soc, (void *)cached_desc,
  1118. tx_exc_metadata,
  1119. &ast_idx, &ast_hash);
  1120. }
  1121. hal_tx_desc_cached = (void *)cached_desc;
  1122. if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
  1123. dp_sawf_config_be(soc, hal_tx_desc_cached,
  1124. &fw_metadata, tx_desc->nbuf, msdu_info);
  1125. dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
  1126. }
  1127. hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
  1128. tx_desc->dma_addr, bm_id, tx_desc->id,
  1129. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
  1130. hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
  1131. vdev->lmac_id);
  1132. hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
  1133. ast_idx);
  1134. /*
  1135. * Bank_ID is used as DSCP_TABLE number in beryllium
  1136. * So there is no explicit field used for DSCP_TID_TABLE_NUM.
  1137. */
  1138. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  1139. (ast_hash & 0xF));
  1140. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  1141. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  1142. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  1143. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  1144. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  1145. /* verify checksum offload configuration*/
  1146. if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
  1147. QDF_NBUF_TX_CKSUM_TCP_UDP) ||
  1148. qdf_nbuf_is_tso(tx_desc->nbuf)) {
  1149. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  1150. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  1151. }
  1152. hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id);
  1153. dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
  1154. tid = msdu_info->tid;
  1155. if (tid != HTT_TX_EXT_TID_INVALID)
  1156. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  1157. dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
  1158. tx_desc->nbuf);
  1159. dp_tx_set_particular_tx_queue(soc, hal_tx_desc_cached,
  1160. tx_desc->nbuf);
  1161. dp_tx_desc_set_ktimestamp(vdev, tx_desc);
  1162. hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
  1163. if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
  1164. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  1165. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1166. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1167. dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
  1168. return status;
  1169. }
  1170. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  1171. if (qdf_unlikely(!hal_tx_desc)) {
  1172. dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
  1173. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1174. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1175. dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
  1176. goto ring_access_fail;
  1177. }
  1178. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  1179. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
  1180. /* Sync cached descriptor with HW */
  1181. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes);
  1182. coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
  1183. msdu_info, ring_id);
  1184. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, dp_tx_get_pkt_len(tx_desc));
  1185. DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
  1186. dp_tx_update_stats(soc, tx_desc, ring_id);
  1187. status = QDF_STATUS_SUCCESS;
  1188. dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
  1189. hal_ring_hdl, soc, ring_id);
  1190. ring_access_fail:
  1191. dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
  1192. dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
  1193. qdf_get_log_timestamp(), tx_desc->nbuf);
  1194. return status;
  1195. }
  1196. #ifdef IPA_OFFLOAD
  1197. static void
  1198. dp_tx_get_ipa_bank_config(struct dp_soc_be *be_soc,
  1199. union hal_tx_bank_config *bank_config)
  1200. {
  1201. bank_config->epd = 0;
  1202. bank_config->encap_type = wlan_cfg_pkt_type(be_soc->soc.wlan_cfg_ctx);
  1203. bank_config->encrypt_type = 0;
  1204. bank_config->src_buffer_swap = 0;
  1205. bank_config->link_meta_swap = 0;
  1206. bank_config->index_lookup_enable = 0;
  1207. bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
  1208. bank_config->addrx_en = 1;
  1209. bank_config->addry_en = 1;
  1210. bank_config->mesh_enable = 0;
  1211. bank_config->dscp_tid_map_id = 0;
  1212. bank_config->vdev_id_check_en = 0;
  1213. bank_config->pmac_id = 0;
  1214. }
  1215. static void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
  1216. {
  1217. union hal_tx_bank_config ipa_config = {0};
  1218. int bid;
  1219. if (!wlan_cfg_is_ipa_enabled(be_soc->soc.wlan_cfg_ctx)) {
  1220. be_soc->ipa_bank_id = DP_BE_INVALID_BANK_ID;
  1221. return;
  1222. }
  1223. dp_tx_get_ipa_bank_config(be_soc, &ipa_config);
  1224. /* Let IPA use last HOST owned bank */
  1225. bid = be_soc->num_bank_profiles - 1;
  1226. be_soc->bank_profiles[bid].is_configured = true;
  1227. be_soc->bank_profiles[bid].bank_config.val = ipa_config.val;
  1228. hal_tx_populate_bank_register(be_soc->soc.hal_soc,
  1229. &be_soc->bank_profiles[bid].bank_config,
  1230. bid);
  1231. qdf_atomic_inc(&be_soc->bank_profiles[bid].ref_count);
  1232. dp_info("IPA bank at slot %d config:0x%x", bid,
  1233. be_soc->bank_profiles[bid].bank_config.val);
  1234. be_soc->ipa_bank_id = bid;
  1235. }
  1236. #else /* !IPA_OFFLOAD */
  1237. static inline void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
  1238. {
  1239. }
  1240. #endif /* IPA_OFFLOAD */
  1241. QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
  1242. {
  1243. int i, num_tcl_banks;
  1244. num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
  1245. qdf_assert_always(num_tcl_banks);
  1246. be_soc->num_bank_profiles = num_tcl_banks;
  1247. be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
  1248. sizeof(*be_soc->bank_profiles));
  1249. if (!be_soc->bank_profiles) {
  1250. dp_err("unable to allocate memory for DP TX Profiles!");
  1251. return QDF_STATUS_E_NOMEM;
  1252. }
  1253. DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
  1254. for (i = 0; i < num_tcl_banks; i++) {
  1255. be_soc->bank_profiles[i].is_configured = false;
  1256. qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
  1257. }
  1258. dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
  1259. dp_tx_init_ipa_bank_profile(be_soc);
  1260. return QDF_STATUS_SUCCESS;
  1261. }
  1262. void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
  1263. {
  1264. qdf_mem_free(be_soc->bank_profiles);
  1265. DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
  1266. }
  1267. static
  1268. void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
  1269. union hal_tx_bank_config *bank_config)
  1270. {
  1271. struct dp_vdev *vdev = &be_vdev->vdev;
  1272. bank_config->epd = 0;
  1273. bank_config->encap_type = vdev->tx_encap_type;
  1274. /* Only valid for raw frames. Needs work for RAW mode */
  1275. if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
  1276. bank_config->encrypt_type = sec_type_map[vdev->sec_type];
  1277. } else {
  1278. bank_config->encrypt_type = 0;
  1279. }
  1280. bank_config->src_buffer_swap = 0;
  1281. bank_config->link_meta_swap = 0;
  1282. if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
  1283. vdev->opmode == wlan_op_mode_sta) {
  1284. bank_config->index_lookup_enable = 1;
  1285. bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
  1286. bank_config->addrx_en = 0;
  1287. bank_config->addry_en = 0;
  1288. } else {
  1289. bank_config->index_lookup_enable = 0;
  1290. bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
  1291. bank_config->addrx_en =
  1292. (vdev->hal_desc_addr_search_flags &
  1293. HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
  1294. bank_config->addry_en =
  1295. (vdev->hal_desc_addr_search_flags &
  1296. HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
  1297. }
  1298. bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
  1299. bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
  1300. /* Disabling vdev id check for now. Needs revist. */
  1301. bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
  1302. bank_config->pmac_id = vdev->lmac_id;
  1303. }
  1304. int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
  1305. struct dp_vdev_be *be_vdev)
  1306. {
  1307. char *temp_str = "";
  1308. bool found_match = false;
  1309. int bank_id = DP_BE_INVALID_BANK_ID;
  1310. int i;
  1311. int unconfigured_slot = DP_BE_INVALID_BANK_ID;
  1312. int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
  1313. union hal_tx_bank_config vdev_config = {0};
  1314. /* convert vdev params into hal_tx_bank_config */
  1315. dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
  1316. DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
  1317. /* go over all banks and find a matching/unconfigured/unused bank */
  1318. for (i = 0; i < be_soc->num_bank_profiles; i++) {
  1319. if (be_soc->bank_profiles[i].is_configured &&
  1320. (be_soc->bank_profiles[i].bank_config.val ^
  1321. vdev_config.val) == 0) {
  1322. found_match = true;
  1323. break;
  1324. }
  1325. if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
  1326. !be_soc->bank_profiles[i].is_configured)
  1327. unconfigured_slot = i;
  1328. else if (zero_ref_count_slot == DP_BE_INVALID_BANK_ID &&
  1329. !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
  1330. zero_ref_count_slot = i;
  1331. }
  1332. if (found_match) {
  1333. temp_str = "matching";
  1334. bank_id = i;
  1335. goto inc_ref_and_return;
  1336. }
  1337. if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
  1338. temp_str = "unconfigured";
  1339. bank_id = unconfigured_slot;
  1340. goto configure_and_return;
  1341. }
  1342. if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
  1343. temp_str = "zero_ref_count";
  1344. bank_id = zero_ref_count_slot;
  1345. }
  1346. if (bank_id == DP_BE_INVALID_BANK_ID) {
  1347. dp_alert("unable to find TX bank!");
  1348. QDF_BUG(0);
  1349. return bank_id;
  1350. }
  1351. configure_and_return:
  1352. be_soc->bank_profiles[bank_id].is_configured = true;
  1353. be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
  1354. hal_tx_populate_bank_register(be_soc->soc.hal_soc,
  1355. &be_soc->bank_profiles[bank_id].bank_config,
  1356. bank_id);
  1357. inc_ref_and_return:
  1358. qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
  1359. DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
  1360. dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
  1361. temp_str, bank_id, vdev_config.val,
  1362. be_soc->bank_profiles[bank_id].bank_config.val,
  1363. qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
  1364. dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
  1365. be_soc->bank_profiles[bank_id].bank_config.epd,
  1366. be_soc->bank_profiles[bank_id].bank_config.encap_type,
  1367. be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
  1368. be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
  1369. be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
  1370. be_soc->bank_profiles[bank_id].bank_config.addrx_en,
  1371. be_soc->bank_profiles[bank_id].bank_config.addry_en,
  1372. be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
  1373. be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
  1374. be_soc->bank_profiles[bank_id].bank_config.pmac_id,
  1375. be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
  1376. return bank_id;
  1377. }
  1378. void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
  1379. struct dp_vdev_be *be_vdev)
  1380. {
  1381. DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
  1382. qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
  1383. DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
  1384. }
  1385. void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
  1386. struct dp_vdev_be *be_vdev)
  1387. {
  1388. dp_tx_put_bank_profile(be_soc, be_vdev);
  1389. be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
  1390. be_vdev->vdev.bank_id = be_vdev->bank_id;
  1391. }
  1392. QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
  1393. uint32_t num_elem,
  1394. uint8_t pool_id)
  1395. {
  1396. struct dp_tx_desc_pool_s *tx_desc_pool;
  1397. struct dp_hw_cookie_conversion_t *cc_ctx;
  1398. struct dp_soc_be *be_soc;
  1399. struct dp_spt_page_desc *page_desc;
  1400. struct dp_tx_desc_s *tx_desc;
  1401. uint32_t ppt_idx = 0;
  1402. uint32_t avail_entry_index = 0;
  1403. if (!num_elem) {
  1404. dp_err("desc_num 0 !!");
  1405. return QDF_STATUS_E_FAILURE;
  1406. }
  1407. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1408. tx_desc_pool = &soc->tx_desc[pool_id];
  1409. cc_ctx = &be_soc->tx_cc_ctx[pool_id];
  1410. tx_desc = tx_desc_pool->freelist;
  1411. page_desc = &cc_ctx->page_desc_base[0];
  1412. while (tx_desc) {
  1413. if (avail_entry_index == 0) {
  1414. if (ppt_idx >= cc_ctx->total_page_num) {
  1415. dp_alert("insufficient secondary page tables");
  1416. qdf_assert_always(0);
  1417. }
  1418. page_desc = &cc_ctx->page_desc_base[ppt_idx++];
  1419. }
  1420. /* put each TX Desc VA to SPT pages and
  1421. * get corresponding ID
  1422. */
  1423. DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
  1424. avail_entry_index,
  1425. tx_desc);
  1426. tx_desc->id =
  1427. dp_cc_desc_id_generate(page_desc->ppt_index,
  1428. avail_entry_index);
  1429. tx_desc->pool_id = pool_id;
  1430. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  1431. tx_desc = tx_desc->next;
  1432. avail_entry_index = (avail_entry_index + 1) &
  1433. DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
  1434. }
  1435. return QDF_STATUS_SUCCESS;
  1436. }
  1437. void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
  1438. struct dp_tx_desc_pool_s *tx_desc_pool,
  1439. uint8_t pool_id)
  1440. {
  1441. struct dp_spt_page_desc *page_desc;
  1442. struct dp_soc_be *be_soc;
  1443. int i = 0;
  1444. struct dp_hw_cookie_conversion_t *cc_ctx;
  1445. be_soc = dp_get_be_soc_from_dp_soc(soc);
  1446. cc_ctx = &be_soc->tx_cc_ctx[pool_id];
  1447. for (i = 0; i < cc_ctx->total_page_num; i++) {
  1448. page_desc = &cc_ctx->page_desc_base[i];
  1449. qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
  1450. }
  1451. }
  1452. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  1453. uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  1454. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  1455. uint32_t quota)
  1456. {
  1457. struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
  1458. uint32_t work_done = 0;
  1459. if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
  1460. DP_SRNG_THRESH_NEAR_FULL)
  1461. return 0;
  1462. qdf_atomic_set(&tx_comp_ring->near_full, 1);
  1463. work_done++;
  1464. return work_done;
  1465. }
  1466. #endif
  1467. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1468. defined(WLAN_CONFIG_TX_DELAY)
  1469. #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
  1470. (((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
  1471. #define HW_TX_DELAY_MAX 0x1000000
  1472. #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US 10
  1473. #define HW_TX_DELAY_MASK 0x1FFFFFFF
  1474. #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
  1475. (((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
  1476. HW_TX_DELAY_MASK)
  1477. static inline
  1478. QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
  1479. struct dp_vdev *vdev,
  1480. struct hal_tx_completion_status *ts,
  1481. uint32_t *delay_us)
  1482. {
  1483. uint32_t ppdu_id;
  1484. uint8_t link_id_offset, link_id_bits;
  1485. uint8_t hw_link_id;
  1486. uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
  1487. uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
  1488. uint32_t delay;
  1489. int32_t delta_tsf2, delta_tqm;
  1490. if (!ts->valid)
  1491. return QDF_STATUS_E_INVAL;
  1492. link_id_offset = soc->link_id_offset;
  1493. link_id_bits = soc->link_id_bits;
  1494. ppdu_id = ts->ppdu_id;
  1495. hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
  1496. link_id_bits);
  1497. msdu_tqm_enqueue_tstamp_us =
  1498. TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
  1499. msdu_compl_tsf_tstamp_us = ts->tsf;
  1500. delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
  1501. delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
  1502. final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
  1503. delta_tqm) & HW_TX_DELAY_MASK;
  1504. final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
  1505. delta_tsf2) & HW_TX_DELAY_MASK;
  1506. delay = (final_msdu_compl_tsf_tstamp_us -
  1507. final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
  1508. if (delay > HW_TX_DELAY_MAX)
  1509. return QDF_STATUS_E_FAILURE;
  1510. if (delay_us)
  1511. *delay_us = delay;
  1512. return QDF_STATUS_SUCCESS;
  1513. }
  1514. #else
  1515. static inline
  1516. QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
  1517. struct dp_vdev *vdev,
  1518. struct hal_tx_completion_status *ts,
  1519. uint32_t *delay_us)
  1520. {
  1521. return QDF_STATUS_SUCCESS;
  1522. }
  1523. #endif
  1524. QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
  1525. struct dp_vdev *vdev,
  1526. struct hal_tx_completion_status *ts,
  1527. uint32_t *delay_us)
  1528. {
  1529. return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
  1530. }
  1531. static inline
  1532. qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev,
  1533. struct dp_tx_desc_s *tx_desc,
  1534. qdf_nbuf_t nbuf)
  1535. {
  1536. qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data,
  1537. (void *)(nbuf->data + 256));
  1538. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  1539. }
  1540. static inline
  1541. void dp_tx_nbuf_unmap_be(struct dp_soc *soc,
  1542. struct dp_tx_desc_s *desc)
  1543. {
  1544. }
  1545. #ifdef QCA_DP_TX_NBUF_LIST_FREE
  1546. qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1547. qdf_nbuf_t nbuf)
  1548. {
  1549. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1550. struct dp_vdev *vdev = NULL;
  1551. struct dp_pdev *pdev = NULL;
  1552. struct dp_tx_desc_s *tx_desc;
  1553. uint16_t desc_pool_id;
  1554. uint16_t pkt_len;
  1555. qdf_dma_addr_t paddr;
  1556. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  1557. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  1558. hal_ring_handle_t hal_ring_hdl = NULL;
  1559. uint32_t *hal_tx_desc_cached;
  1560. void *hal_tx_desc;
  1561. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  1562. return nbuf;
  1563. vdev = soc->vdev_id_map[vdev_id];
  1564. if (qdf_unlikely(!vdev))
  1565. return nbuf;
  1566. desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
  1567. pkt_len = qdf_nbuf_headlen(nbuf);
  1568. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, pkt_len);
  1569. DP_STATS_INC(vdev, tx_i.rcvd_in_fast_xmit_flow, 1);
  1570. DP_STATS_INC(vdev, tx_i.rcvd_per_core[desc_pool_id], 1);
  1571. pdev = vdev->pdev;
  1572. if (dp_tx_limit_check(vdev, nbuf))
  1573. return nbuf;
  1574. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1575. if (qdf_unlikely(!tx_desc)) {
  1576. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1577. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  1578. return nbuf;
  1579. }
  1580. dp_tx_outstanding_inc(pdev);
  1581. /* Initialize the SW tx descriptor */
  1582. tx_desc->nbuf = nbuf;
  1583. tx_desc->shinfo_addr = skb_end_pointer(nbuf);
  1584. tx_desc->frm_type = dp_tx_frm_std;
  1585. tx_desc->tx_encap_type = vdev->tx_encap_type;
  1586. tx_desc->vdev_id = vdev_id;
  1587. tx_desc->pdev = pdev;
  1588. tx_desc->pkt_offset = 0;
  1589. tx_desc->length = pkt_len;
  1590. tx_desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
  1591. tx_desc->nbuf->fast_recycled = 1;
  1592. if (nbuf->is_from_recycler && nbuf->fast_xmit)
  1593. tx_desc->flags |= DP_TX_DESC_FLAG_FAST;
  1594. paddr = dp_tx_nbuf_map_be(vdev, tx_desc, nbuf);
  1595. if (!paddr) {
  1596. /* Handle failure */
  1597. dp_err("qdf_nbuf_map failed");
  1598. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1599. goto release_desc;
  1600. }
  1601. tx_desc->dma_addr = paddr;
  1602. hal_tx_desc_cached = (void *)cached_desc;
  1603. hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr;
  1604. hal_tx_desc_cached[1] = tx_desc->id <<
  1605. TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB;
  1606. /* bank_id */
  1607. hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB;
  1608. hal_tx_desc_cached[3] = vdev->htt_tcl_metadata <<
  1609. TCL_DATA_CMD_TCL_CMD_NUMBER_LSB;
  1610. hal_tx_desc_cached[4] = tx_desc->length;
  1611. /* l3 and l4 checksum enable */
  1612. hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE <<
  1613. TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB;
  1614. hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB;
  1615. hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB;
  1616. if (vdev->opmode == wlan_op_mode_sta)
  1617. hal_tx_desc_cached[6] = vdev->bss_ast_idx |
  1618. ((vdev->bss_ast_hash & 0xF) <<
  1619. TCL_DATA_CMD_CACHE_SET_NUM_LSB);
  1620. hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id);
  1621. if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
  1622. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  1623. DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
  1624. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1625. goto ring_access_fail2;
  1626. }
  1627. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  1628. if (qdf_unlikely(!hal_tx_desc)) {
  1629. dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
  1630. DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
  1631. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1632. goto ring_access_fail;
  1633. }
  1634. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  1635. /* Sync cached descriptor with HW */
  1636. qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, DP_TX_FAST_DESC_SIZE);
  1637. qdf_dsb();
  1638. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
  1639. DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
  1640. status = QDF_STATUS_SUCCESS;
  1641. ring_access_fail:
  1642. dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
  1643. ring_access_fail2:
  1644. if (status != QDF_STATUS_SUCCESS) {
  1645. dp_tx_nbuf_unmap_be(soc, tx_desc);
  1646. goto release_desc;
  1647. }
  1648. return NULL;
  1649. release_desc:
  1650. dp_tx_desc_release(soc, tx_desc, desc_pool_id);
  1651. return nbuf;
  1652. }
  1653. #endif
  1654. QDF_STATUS dp_tx_desc_pool_alloc_be(struct dp_soc *soc, uint32_t num_elem,
  1655. uint8_t pool_id)
  1656. {
  1657. return QDF_STATUS_SUCCESS;
  1658. }
  1659. void dp_tx_desc_pool_free_be(struct dp_soc *soc, uint8_t pool_id)
  1660. {
  1661. }