dp_be_tx.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "cdp_txrx_cmn_struct.h"
  20. #include "dp_types.h"
  21. #include "dp_tx.h"
  22. #include "dp_be_tx.h"
  23. #include "dp_tx_desc.h"
  24. #include "hal_tx.h"
  25. #include <hal_be_api.h>
  26. #include <hal_be_tx.h>
  27. #include <dp_htt.h>
  28. #include "dp_internal.h"
  29. #ifdef FEATURE_WDS
  30. #include "dp_txrx_wds.h"
  31. #endif
  32. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  33. #define DP_TX_BANK_LOCK_CREATE(lock) qdf_mutex_create(lock)
  34. #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_mutex_destroy(lock)
  35. #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_mutex_acquire(lock)
  36. #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_mutex_release(lock)
  37. #else
  38. #define DP_TX_BANK_LOCK_CREATE(lock) qdf_spinlock_create(lock)
  39. #define DP_TX_BANK_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
  40. #define DP_TX_BANK_LOCK_ACQUIRE(lock) qdf_spin_lock_bh(lock)
  41. #define DP_TX_BANK_LOCK_RELEASE(lock) qdf_spin_unlock_bh(lock)
  42. #endif
  43. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  44. #ifdef WLAN_MCAST_MLO
  45. /* MLO peer id for reinject*/
  46. #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
  47. #define MAX_GSN_NUM 0x0FFF
  48. #ifdef QCA_MULTIPASS_SUPPORT
  49. #define INVALID_VLAN_ID 0xFFFF
  50. #define MULTIPASS_WITH_VLAN_ID 0xFFFE
  51. /**
  52. * struct dp_mlo_mpass_buf - Multipass buffer
  53. * @vlan_id: vlan_id of frame
  54. * @nbuf: pointer to skb buf
  55. */
  56. struct dp_mlo_mpass_buf {
  57. uint16_t vlan_id;
  58. qdf_nbuf_t nbuf;
  59. };
  60. #endif
  61. #endif
  62. #endif
  63. #define DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(_var) \
  64. HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(_var)
  65. #define DP_TX_WBM_COMPLETION_V3_VALID_GET(_var) \
  66. HTT_TX_WBM_COMPLETION_V2_VALID_GET(_var)
  67. #define DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(_var) \
  68. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(_var)
  69. #define DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(_var) \
  70. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(_var)
  71. #define DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(_var) \
  72. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(_var)
  73. #define DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(_var) \
  74. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(_var)
  75. #define DP_TX_WBM_COMPLETION_V3_TRANSMIT_CNT_VALID_GET(_var) \
  76. HTT_TX_WBM_COMPLETION_V2_TRANSMIT_CNT_VALID_GET(_var)
  77. extern uint8_t sec_type_map[MAX_CDP_SEC_TYPE];
  78. #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
  79. /*
  80. * Value to mark ring desc is invalidated by buffer_virt_addr_63_32 field
  81. * of WBM2SW ring Desc.
  82. */
  83. #define DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE 0x12121212
  84. /**
  85. * dp_tx_comp_desc_check_and_invalidate() - sanity check for ring desc and
  86. * invalidate it after each reaping
  87. * @tx_comp_hal_desc: ring desc virtual address
  88. * @r_tx_desc: pointer to current dp TX Desc pointer
  89. * @tx_desc_va: the original 64 bits Desc VA got from ring Desc
  90. * @hw_cc_done: HW cookie conversion done or not
  91. *
  92. * If HW CC is done, check the buffer_virt_addr_63_32 value to know if
  93. * ring Desc is stale or not. if HW CC is not done, then compare PA between
  94. * ring Desc and current TX desc.
  95. *
  96. * Return: None.
  97. */
  98. static inline
  99. void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
  100. struct dp_tx_desc_s **r_tx_desc,
  101. uint64_t tx_desc_va,
  102. bool hw_cc_done)
  103. {
  104. qdf_dma_addr_t desc_dma_addr;
  105. if (qdf_likely(hw_cc_done)) {
  106. /* Check upper 32 bits */
  107. if (DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE ==
  108. (tx_desc_va >> 32))
  109. *r_tx_desc = NULL;
  110. /* Invalidate the ring desc for 32 ~ 63 bits of VA */
  111. hal_tx_comp_set_desc_va_63_32(
  112. tx_comp_hal_desc,
  113. DP_TX_COMP_DESC_BUFF_VA_32BITS_HI_INVALIDATE);
  114. } else {
  115. /* Compare PA between ring desc and current TX desc stored */
  116. desc_dma_addr = hal_tx_comp_get_paddr(tx_comp_hal_desc);
  117. if (desc_dma_addr != (*r_tx_desc)->dma_addr)
  118. *r_tx_desc = NULL;
  119. }
  120. }
  121. #else
  122. static inline
  123. void dp_tx_comp_desc_check_and_invalidate(void *tx_comp_hal_desc,
  124. struct dp_tx_desc_s **r_tx_desc,
  125. uint64_t tx_desc_va,
  126. bool hw_cc_done)
  127. {
  128. }
  129. #endif
  130. #ifdef DP_FEATURE_HW_COOKIE_CONVERSION
  131. #ifdef DP_HW_COOKIE_CONVERT_EXCEPTION
  132. void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
  133. void *tx_comp_hal_desc,
  134. struct dp_tx_desc_s **r_tx_desc)
  135. {
  136. uint32_t tx_desc_id;
  137. uint64_t tx_desc_va = 0;
  138. bool hw_cc_done =
  139. hal_tx_comp_get_cookie_convert_done(tx_comp_hal_desc);
  140. if (qdf_likely(hw_cc_done)) {
  141. /* HW cookie conversion done */
  142. tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
  143. *r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
  144. } else {
  145. /* SW do cookie conversion to VA */
  146. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  147. *r_tx_desc =
  148. (struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
  149. }
  150. dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
  151. r_tx_desc, tx_desc_va,
  152. hw_cc_done);
  153. if (*r_tx_desc)
  154. (*r_tx_desc)->peer_id =
  155. dp_tx_comp_get_peer_id_be(soc,
  156. tx_comp_hal_desc);
  157. }
  158. #else
  159. void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
  160. void *tx_comp_hal_desc,
  161. struct dp_tx_desc_s **r_tx_desc)
  162. {
  163. uint64_t tx_desc_va;
  164. tx_desc_va = hal_tx_comp_get_desc_va(tx_comp_hal_desc);
  165. *r_tx_desc = (struct dp_tx_desc_s *)(uintptr_t)tx_desc_va;
  166. dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
  167. r_tx_desc,
  168. tx_desc_va,
  169. true);
  170. if (*r_tx_desc)
  171. (*r_tx_desc)->peer_id =
  172. dp_tx_comp_get_peer_id_be(soc,
  173. tx_comp_hal_desc);
  174. }
  175. #endif /* DP_HW_COOKIE_CONVERT_EXCEPTION */
  176. #else
  177. void dp_tx_comp_get_params_from_hal_desc_be(struct dp_soc *soc,
  178. void *tx_comp_hal_desc,
  179. struct dp_tx_desc_s **r_tx_desc)
  180. {
  181. uint32_t tx_desc_id;
  182. /* SW do cookie conversion to VA */
  183. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  184. *r_tx_desc =
  185. (struct dp_tx_desc_s *)dp_cc_desc_find(soc, tx_desc_id);
  186. dp_tx_comp_desc_check_and_invalidate(tx_comp_hal_desc,
  187. r_tx_desc, 0,
  188. false);
  189. if (*r_tx_desc)
  190. (*r_tx_desc)->peer_id =
  191. dp_tx_comp_get_peer_id_be(soc,
  192. tx_comp_hal_desc);
  193. }
  194. #endif /* DP_FEATURE_HW_COOKIE_CONVERSION */
  195. static inline
  196. void dp_tx_process_mec_notify_be(struct dp_soc *soc, uint8_t *status)
  197. {
  198. struct dp_vdev *vdev;
  199. uint8_t vdev_id;
  200. uint32_t *htt_desc = (uint32_t *)status;
  201. dp_assert_always_internal(soc->mec_fw_offload);
  202. /*
  203. * Get vdev id from HTT status word in case of MEC
  204. * notification
  205. */
  206. vdev_id = DP_TX_WBM_COMPLETION_V3_VDEV_ID_GET(htt_desc[4]);
  207. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  208. return;
  209. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  210. DP_MOD_ID_HTT_COMP);
  211. if (!vdev)
  212. return;
  213. dp_tx_mec_handler(vdev, status);
  214. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  215. }
  216. void dp_tx_process_htt_completion_be(struct dp_soc *soc,
  217. struct dp_tx_desc_s *tx_desc,
  218. uint8_t *status,
  219. uint8_t ring_id)
  220. {
  221. uint8_t tx_status;
  222. struct dp_pdev *pdev;
  223. struct dp_vdev *vdev = NULL;
  224. struct hal_tx_completion_status ts = {0};
  225. uint32_t *htt_desc = (uint32_t *)status;
  226. struct dp_txrx_peer *txrx_peer;
  227. dp_txrx_ref_handle txrx_ref_handle = NULL;
  228. struct cdp_tid_tx_stats *tid_stats = NULL;
  229. struct htt_soc *htt_handle;
  230. uint8_t vdev_id;
  231. uint16_t peer_id;
  232. tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
  233. htt_handle = (struct htt_soc *)soc->htt_handle;
  234. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  235. /*
  236. * There can be scenario where WBM consuming descriptor enqueued
  237. * from TQM2WBM first and TQM completion can happen before MEC
  238. * notification comes from FW2WBM. Avoid access any field of tx
  239. * descriptor in case of MEC notify.
  240. */
  241. if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY)
  242. return dp_tx_process_mec_notify_be(soc, status);
  243. /*
  244. * If the descriptor is already freed in vdev_detach,
  245. * continue to next descriptor
  246. */
  247. if (qdf_unlikely(!tx_desc->flags)) {
  248. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  249. tx_desc->id);
  250. return;
  251. }
  252. if (qdf_unlikely(tx_desc->vdev_id == DP_INVALID_VDEV_ID)) {
  253. dp_tx_comp_info_rl("Invalid vdev_id %d", tx_desc->id);
  254. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  255. goto release_tx_desc;
  256. }
  257. pdev = tx_desc->pdev;
  258. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  259. dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
  260. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  261. goto release_tx_desc;
  262. }
  263. qdf_assert(tx_desc->pdev);
  264. vdev_id = tx_desc->vdev_id;
  265. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  266. DP_MOD_ID_HTT_COMP);
  267. if (qdf_unlikely(!vdev)) {
  268. dp_tx_comp_info_rl("Unable to get vdev ref %d", tx_desc->id);
  269. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  270. goto release_tx_desc;
  271. }
  272. switch (tx_status) {
  273. case HTT_TX_FW2WBM_TX_STATUS_OK:
  274. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  275. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  276. {
  277. uint8_t tid;
  278. uint8_t transmit_cnt_valid = 0;
  279. if (DP_TX_WBM_COMPLETION_V3_VALID_GET(htt_desc[3])) {
  280. ts.peer_id =
  281. DP_TX_WBM_COMPLETION_V3_SW_PEER_ID_GET(
  282. htt_desc[3]);
  283. ts.tid =
  284. DP_TX_WBM_COMPLETION_V3_TID_NUM_GET(
  285. htt_desc[3]);
  286. } else {
  287. ts.peer_id = HTT_INVALID_PEER;
  288. ts.tid = HTT_INVALID_TID;
  289. }
  290. ts.release_src = HAL_TX_COMP_RELEASE_SOURCE_FW;
  291. ts.ppdu_id =
  292. DP_TX_WBM_COMPLETION_V3_SCH_CMD_ID_GET(
  293. htt_desc[2]);
  294. ts.ack_frame_rssi =
  295. DP_TX_WBM_COMPLETION_V3_ACK_FRAME_RSSI_GET(
  296. htt_desc[2]);
  297. transmit_cnt_valid =
  298. DP_TX_WBM_COMPLETION_V3_TRANSMIT_CNT_VALID_GET(
  299. htt_desc[3]);
  300. if (transmit_cnt_valid)
  301. ts.transmit_cnt =
  302. HTT_TX_WBM_COMPLETION_V3_TRANSMIT_COUNT_GET(
  303. htt_desc[1]);
  304. ts.tsf = htt_desc[4];
  305. ts.first_msdu = 1;
  306. ts.last_msdu = 1;
  307. switch (tx_status) {
  308. case HTT_TX_FW2WBM_TX_STATUS_OK:
  309. ts.status = HAL_TX_TQM_RR_FRAME_ACKED;
  310. break;
  311. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  312. ts.status = HAL_TX_TQM_RR_REM_CMD_REM;
  313. break;
  314. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  315. ts.status = HAL_TX_TQM_RR_REM_CMD_TX;
  316. break;
  317. }
  318. tid = ts.tid;
  319. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  320. tid = CDP_MAX_DATA_TIDS - 1;
  321. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  322. if (qdf_unlikely(pdev->delay_stats_flag) ||
  323. qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)))
  324. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  325. if (tx_status < CDP_MAX_TX_HTT_STATUS)
  326. tid_stats->htt_status_cnt[tx_status]++;
  327. peer_id = dp_tx_comp_adjust_peer_id_be(soc, ts.peer_id);
  328. txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id,
  329. &txrx_ref_handle,
  330. DP_MOD_ID_HTT_COMP);
  331. if (qdf_likely(txrx_peer))
  332. dp_tx_update_peer_basic_stats(
  333. txrx_peer,
  334. qdf_nbuf_len(tx_desc->nbuf),
  335. tx_status,
  336. pdev->enhanced_stats_en);
  337. dp_tx_comp_process_tx_status(soc, tx_desc, &ts, txrx_peer,
  338. ring_id);
  339. dp_tx_comp_process_desc(soc, tx_desc, &ts, txrx_peer);
  340. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  341. if (qdf_likely(txrx_peer))
  342. dp_txrx_peer_unref_delete(txrx_ref_handle,
  343. DP_MOD_ID_HTT_COMP);
  344. break;
  345. }
  346. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  347. {
  348. uint8_t reinject_reason;
  349. reinject_reason =
  350. HTT_TX_WBM_COMPLETION_V3_REINJECT_REASON_GET(
  351. htt_desc[1]);
  352. dp_tx_reinject_handler(soc, vdev, tx_desc,
  353. status, reinject_reason);
  354. break;
  355. }
  356. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  357. {
  358. dp_tx_inspect_handler(soc, vdev, tx_desc, status);
  359. break;
  360. }
  361. case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
  362. {
  363. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  364. goto release_tx_desc;
  365. }
  366. default:
  367. dp_tx_comp_err("Invalid HTT tx_status %d\n",
  368. tx_status);
  369. goto release_tx_desc;
  370. }
  371. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  372. return;
  373. release_tx_desc:
  374. dp_tx_comp_free_buf(soc, tx_desc, false);
  375. dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
  376. if (vdev)
  377. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  378. }
  379. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  380. #ifdef DP_TX_IMPLICIT_RBM_MAPPING
  381. /**
  382. * dp_tx_get_rbm_id_be() - Get the RBM ID for data transmission completion.
  383. * @soc: DP soc structure pointer
  384. * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
  385. *
  386. * Return: RBM ID corresponding to TCL ring_id
  387. */
  388. static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
  389. uint8_t ring_id)
  390. {
  391. return 0;
  392. }
  393. #else
  394. static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
  395. uint8_t ring_id)
  396. {
  397. return (ring_id ? soc->wbm_sw0_bm_id + (ring_id - 1) :
  398. HAL_WBM_SW2_BM_ID(soc->wbm_sw0_bm_id));
  399. }
  400. #endif /*DP_TX_IMPLICIT_RBM_MAPPING*/
  401. #else
  402. static inline uint8_t dp_tx_get_rbm_id_be(struct dp_soc *soc,
  403. uint8_t tcl_index)
  404. {
  405. uint8_t rbm;
  406. rbm = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_index);
  407. dp_verbose_debug("tcl_id %u rbm %u", tcl_index, rbm);
  408. return rbm;
  409. }
  410. #endif
  411. #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
  412. /**
  413. * dp_tx_set_min_rates_for_critical_frames()- sets min-rates for critical pkts
  414. * @soc: DP soc structure pointer
  415. * @hal_tx_desc: HAL descriptor where fields are set
  416. * @nbuf: skb to be considered for min rates
  417. *
  418. * The function relies on upper layers to set QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL
  419. * and uses it to determine if the frame is critical. For a critical frame,
  420. * flow override bits are set to classify the frame into HW's high priority
  421. * queue. The HW will pick pre-configured min rates for such packets.
  422. *
  423. * Return: None
  424. */
  425. static void
  426. dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
  427. uint32_t *hal_tx_desc,
  428. qdf_nbuf_t nbuf)
  429. {
  430. /*
  431. * Critical frames should be queued to the high priority queue for the TID on
  432. * on which they are sent out (for the concerned peer).
  433. * FW is using HTT_MSDU_Q_IDX 2 for HOL (high priority) queue.
  434. * htt_msdu_idx = (2 * who_classify_info_sel) + flow_override
  435. * Hence, using who_classify_info_sel = 1, flow_override = 0 to select
  436. * HOL queue.
  437. */
  438. if (QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(nbuf)) {
  439. hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
  440. hal_tx_desc_set_flow_override(hal_tx_desc, 0);
  441. hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
  442. hal_tx_desc_set_tx_notify_frame(hal_tx_desc,
  443. TX_SEMI_HARD_NOTIFY_E);
  444. }
  445. }
  446. #else
  447. static inline void
  448. dp_tx_set_min_rates_for_critical_frames(struct dp_soc *soc,
  449. uint32_t *hal_tx_desc_cached,
  450. qdf_nbuf_t nbuf)
  451. {
  452. }
  453. #endif
  454. #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
  455. /**
  456. * dp_tx_set_particular_tx_queue() - set particular TX TQM flow queue 3 for
  457. * TX packets, currently TCP ACK only
  458. * @soc: DP soc structure pointer
  459. * @hal_tx_desc: HAL descriptor where fields are set
  460. * @nbuf: skb to be considered for particular TX queue
  461. *
  462. * Return: None
  463. */
  464. static inline
  465. void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
  466. uint32_t *hal_tx_desc,
  467. qdf_nbuf_t nbuf)
  468. {
  469. if (!soc->tx_ilp_enable)
  470. return;
  471. if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
  472. QDF_NBUF_CB_PACKET_TYPE_TCP_ACK)) {
  473. hal_tx_desc_set_flow_override_enable(hal_tx_desc, 1);
  474. hal_tx_desc_set_flow_override(hal_tx_desc, 1);
  475. hal_tx_desc_set_who_classify_info_sel(hal_tx_desc, 1);
  476. }
  477. }
  478. #else
  479. static inline
  480. void dp_tx_set_particular_tx_queue(struct dp_soc *soc,
  481. uint32_t *hal_tx_desc,
  482. qdf_nbuf_t nbuf)
  483. {
  484. }
  485. #endif
  486. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  487. defined(WLAN_MCAST_MLO)
  488. #ifdef QCA_MULTIPASS_SUPPORT
  489. /**
  490. * dp_tx_mlo_mcast_multipass_lookup() - lookup vlan_id in mpass peer list
  491. * @be_vdev: Handle to DP be_vdev structure
  492. * @ptnr_vdev: DP ptnr_vdev handle
  493. * @arg: pointer to dp_mlo_mpass_ buf
  494. *
  495. * Return: None
  496. */
  497. static void
  498. dp_tx_mlo_mcast_multipass_lookup(struct dp_vdev_be *be_vdev,
  499. struct dp_vdev *ptnr_vdev,
  500. void *arg)
  501. {
  502. struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
  503. struct dp_txrx_peer *txrx_peer = NULL;
  504. struct vlan_ethhdr *veh = NULL;
  505. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(ptr->nbuf);
  506. uint16_t vlan_id = 0;
  507. bool not_vlan = ((ptnr_vdev->tx_encap_type == htt_cmn_pkt_type_raw) ||
  508. (htons(eh->ether_type) != ETH_P_8021Q));
  509. if (qdf_unlikely(not_vlan))
  510. return;
  511. veh = (struct vlan_ethhdr *)eh;
  512. vlan_id = (ntohs(veh->h_vlan_TCI) & VLAN_VID_MASK);
  513. qdf_spin_lock_bh(&ptnr_vdev->mpass_peer_mutex);
  514. TAILQ_FOREACH(txrx_peer, &ptnr_vdev->mpass_peer_list,
  515. mpass_peer_list_elem) {
  516. if (vlan_id == txrx_peer->vlan_id) {
  517. qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
  518. ptr->vlan_id = vlan_id;
  519. return;
  520. }
  521. }
  522. qdf_spin_unlock_bh(&ptnr_vdev->mpass_peer_mutex);
  523. }
  524. /**
  525. * dp_tx_mlo_mcast_multipass_send() - send multipass MLO Mcast packets
  526. * @be_vdev: Handle to DP be_vdev structure
  527. * @ptnr_vdev: DP ptnr_vdev handle
  528. * @arg: pointer to dp_mlo_mpass_ buf
  529. *
  530. * Return: None
  531. */
  532. static void
  533. dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
  534. struct dp_vdev *ptnr_vdev,
  535. void *arg)
  536. {
  537. struct dp_mlo_mpass_buf *ptr = (struct dp_mlo_mpass_buf *)arg;
  538. struct dp_tx_msdu_info_s msdu_info;
  539. struct dp_vdev_be *be_ptnr_vdev = NULL;
  540. qdf_nbuf_t nbuf_clone;
  541. uint16_t group_key = 0;
  542. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  543. if (be_vdev != be_ptnr_vdev) {
  544. nbuf_clone = qdf_nbuf_clone(ptr->nbuf);
  545. if (qdf_unlikely(!nbuf_clone)) {
  546. dp_tx_debug("nbuf clone failed");
  547. return;
  548. }
  549. } else {
  550. nbuf_clone = ptr->nbuf;
  551. }
  552. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  553. dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
  554. msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
  555. if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
  556. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  557. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(
  558. msdu_info.meta_data[0], 1);
  559. } else {
  560. /* return when vlan map is not initialized */
  561. if (!ptnr_vdev->iv_vlan_map)
  562. return;
  563. group_key = ptnr_vdev->iv_vlan_map[ptr->vlan_id];
  564. /*
  565. * If group key is not installed, drop the frame.
  566. */
  567. if (!group_key)
  568. return;
  569. dp_tx_remove_vlan_tag(ptnr_vdev, nbuf_clone);
  570. dp_tx_add_groupkey_metadata(ptnr_vdev, &msdu_info, group_key);
  571. msdu_info.exception_fw = 1;
  572. }
  573. nbuf_clone = dp_tx_send_msdu_single(
  574. ptnr_vdev,
  575. nbuf_clone,
  576. &msdu_info,
  577. DP_MLO_MCAST_REINJECT_PEER_ID,
  578. NULL);
  579. if (qdf_unlikely(nbuf_clone)) {
  580. dp_info("pkt send failed");
  581. qdf_nbuf_free(nbuf_clone);
  582. return;
  583. }
  584. }
  585. /**
  586. * dp_tx_mlo_mcast_multipass_handler - If frame needs multipass processing
  587. * @soc: DP soc handle
  588. * @vdev: DP vdev handle
  589. * @nbuf: nbuf to be enqueued
  590. *
  591. * Return: true if handling is done else false
  592. */
  593. static bool
  594. dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc,
  595. struct dp_vdev *vdev,
  596. qdf_nbuf_t nbuf)
  597. {
  598. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  599. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  600. qdf_nbuf_t nbuf_copy = NULL;
  601. struct dp_mlo_mpass_buf mpass_buf;
  602. memset(&mpass_buf, 0, sizeof(struct dp_mlo_mpass_buf));
  603. mpass_buf.vlan_id = INVALID_VLAN_ID;
  604. mpass_buf.nbuf = nbuf;
  605. dp_tx_mlo_mcast_multipass_lookup(be_vdev, vdev, &mpass_buf);
  606. if (mpass_buf.vlan_id == INVALID_VLAN_ID) {
  607. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  608. dp_tx_mlo_mcast_multipass_lookup,
  609. &mpass_buf, DP_MOD_ID_TX,
  610. DP_ALL_VDEV_ITER,
  611. DP_VDEV_ITERATE_SKIP_SELF);
  612. /*
  613. * Do not drop the frame when vlan_id doesn't match.
  614. * Send the frame as it is.
  615. */
  616. if (mpass_buf.vlan_id == INVALID_VLAN_ID)
  617. return false;
  618. }
  619. /* AP can have classic clients, special clients &
  620. * classic repeaters.
  621. * 1. Classic clients & special client:
  622. * Remove vlan header, find corresponding group key
  623. * index, fill in metaheader and enqueue multicast
  624. * frame to TCL.
  625. * 2. Classic repeater:
  626. * Pass through to classic repeater with vlan tag
  627. * intact without any group key index. Hardware
  628. * will know which key to use to send frame to
  629. * repeater.
  630. */
  631. nbuf_copy = qdf_nbuf_copy(nbuf);
  632. /*
  633. * Send multicast frame to special peers even
  634. * if pass through to classic repeater fails.
  635. */
  636. if (nbuf_copy) {
  637. struct dp_mlo_mpass_buf mpass_buf_copy = {0};
  638. mpass_buf_copy.vlan_id = MULTIPASS_WITH_VLAN_ID;
  639. mpass_buf_copy.nbuf = nbuf_copy;
  640. /* send frame on partner vdevs */
  641. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  642. dp_tx_mlo_mcast_multipass_send,
  643. &mpass_buf_copy, DP_MOD_ID_TX,
  644. DP_LINK_VDEV_ITER,
  645. DP_VDEV_ITERATE_SKIP_SELF);
  646. /* send frame on mcast primary vdev */
  647. dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf_copy);
  648. if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
  649. be_vdev->mlo_dev_ctxt->seq_num = 0;
  650. else
  651. be_vdev->mlo_dev_ctxt->seq_num++;
  652. }
  653. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  654. dp_tx_mlo_mcast_multipass_send,
  655. &mpass_buf, DP_MOD_ID_TX, DP_LINK_VDEV_ITER,
  656. DP_VDEV_ITERATE_SKIP_SELF);
  657. dp_tx_mlo_mcast_multipass_send(be_vdev, vdev, &mpass_buf);
  658. if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
  659. be_vdev->mlo_dev_ctxt->seq_num = 0;
  660. else
  661. be_vdev->mlo_dev_ctxt->seq_num++;
  662. return true;
  663. }
  664. #else
  665. static bool
  666. dp_tx_mlo_mcast_multipass_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  667. qdf_nbuf_t nbuf)
  668. {
  669. return false;
  670. }
  671. #endif
  672. void
  673. dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
  674. struct dp_vdev *ptnr_vdev,
  675. void *arg)
  676. {
  677. qdf_nbuf_t nbuf = (qdf_nbuf_t)arg;
  678. qdf_nbuf_t nbuf_clone;
  679. struct dp_vdev_be *be_ptnr_vdev = NULL;
  680. struct dp_tx_msdu_info_s msdu_info;
  681. be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
  682. if (be_vdev != be_ptnr_vdev) {
  683. nbuf_clone = qdf_nbuf_clone(nbuf);
  684. if (qdf_unlikely(!nbuf_clone)) {
  685. dp_tx_debug("nbuf clone failed");
  686. return;
  687. }
  688. } else {
  689. nbuf_clone = nbuf;
  690. }
  691. /* NAWDS clients will accepts on 4 addr format MCAST packets
  692. * This will ensure to send packets in 4 addr format to NAWDS clients.
  693. */
  694. if (qdf_unlikely(ptnr_vdev->nawds_enabled)) {
  695. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  696. dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
  697. dp_tx_nawds_handler(ptnr_vdev->pdev->soc, ptnr_vdev,
  698. &msdu_info, nbuf_clone, DP_INVALID_PEER);
  699. }
  700. if (qdf_unlikely(dp_tx_proxy_arp(ptnr_vdev, nbuf_clone) !=
  701. QDF_STATUS_SUCCESS)) {
  702. qdf_nbuf_free(nbuf_clone);
  703. return;
  704. }
  705. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  706. dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
  707. msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
  708. DP_STATS_INC(ptnr_vdev, tx_i.mlo_mcast.send_pkt_count, 1);
  709. nbuf_clone = dp_tx_send_msdu_single(
  710. ptnr_vdev,
  711. nbuf_clone,
  712. &msdu_info,
  713. DP_MLO_MCAST_REINJECT_PEER_ID,
  714. NULL);
  715. if (qdf_unlikely(nbuf_clone)) {
  716. DP_STATS_INC(ptnr_vdev, tx_i.mlo_mcast.fail_pkt_count, 1);
  717. dp_info("pkt send failed");
  718. qdf_nbuf_free(nbuf_clone);
  719. return;
  720. }
  721. }
  722. static inline void
  723. dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
  724. struct dp_vdev *vdev,
  725. struct dp_tx_msdu_info_s *msdu_info)
  726. {
  727. hal_tx_desc_set_vdev_id(hal_tx_desc_cached, msdu_info->vdev_id);
  728. }
  729. void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
  730. struct dp_vdev *vdev,
  731. qdf_nbuf_t nbuf)
  732. {
  733. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  734. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  735. if (qdf_unlikely(vdev->multipass_en) &&
  736. dp_tx_mlo_mcast_multipass_handler(soc, vdev, nbuf))
  737. return;
  738. /* send frame on partner vdevs */
  739. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  740. dp_tx_mlo_mcast_pkt_send,
  741. nbuf, DP_MOD_ID_REINJECT, DP_LINK_VDEV_ITER,
  742. DP_VDEV_ITERATE_SKIP_SELF);
  743. /* send frame on mcast primary vdev */
  744. dp_tx_mlo_mcast_pkt_send(be_vdev, vdev, nbuf);
  745. if (qdf_unlikely(be_vdev->mlo_dev_ctxt->seq_num > MAX_GSN_NUM))
  746. be_vdev->mlo_dev_ctxt->seq_num = 0;
  747. else
  748. be_vdev->mlo_dev_ctxt->seq_num++;
  749. }
  750. bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
  751. struct dp_vdev *vdev)
  752. {
  753. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  754. if (be_vdev->mcast_primary)
  755. return true;
  756. return false;
  757. }
  758. #if defined(CONFIG_MLO_SINGLE_DEV)
  759. static void
  760. dp_tx_mlo_mcast_enhance_be(struct dp_vdev_be *be_vdev,
  761. struct dp_vdev *ptnr_vdev,
  762. void *arg)
  763. {
  764. struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
  765. qdf_nbuf_t nbuf = (qdf_nbuf_t)arg;
  766. if (vdev == ptnr_vdev)
  767. return;
  768. /*
  769. * Hold the reference to avoid free of nbuf in
  770. * dp_tx_mcast_enhance() in case of successful
  771. * conversion
  772. */
  773. qdf_nbuf_ref(nbuf);
  774. if (qdf_unlikely(!dp_tx_mcast_enhance(ptnr_vdev, nbuf)))
  775. return;
  776. qdf_nbuf_free(nbuf);
  777. }
  778. qdf_nbuf_t
  779. dp_tx_mlo_mcast_send_be(struct dp_soc *soc, struct dp_vdev *vdev,
  780. qdf_nbuf_t nbuf,
  781. struct cdp_tx_exception_metadata *tx_exc_metadata)
  782. {
  783. struct dp_vdev_be *be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  784. struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
  785. if (!tx_exc_metadata->is_mlo_mcast)
  786. return nbuf;
  787. if (!be_vdev->mcast_primary) {
  788. qdf_nbuf_free(nbuf);
  789. return NULL;
  790. }
  791. /*
  792. * In the single netdev model avoid reinjection path as mcast
  793. * packet is identified in upper layers while peer search to find
  794. * primary TQM based on dest mac addr
  795. *
  796. * New bonding interface added into the bridge so MCSD will update
  797. * snooping table and wifi driver populates the entries in appropriate
  798. * child net devices.
  799. */
  800. if (vdev->mcast_enhancement_en) {
  801. /*
  802. * As dp_tx_mcast_enhance() can consume the nbuf incase of
  803. * successful conversion hold the reference of nbuf.
  804. *
  805. * Hold the reference to tx on partner links
  806. */
  807. qdf_nbuf_ref(nbuf);
  808. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf))) {
  809. dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
  810. dp_tx_mlo_mcast_enhance_be,
  811. nbuf, DP_MOD_ID_TX,
  812. DP_ALL_VDEV_ITER,
  813. DP_VDEV_ITERATE_SKIP_SELF);
  814. qdf_nbuf_free(nbuf);
  815. return NULL;
  816. }
  817. /* release reference taken above */
  818. qdf_nbuf_free(nbuf);
  819. }
  820. dp_tx_mlo_mcast_handler_be(soc, vdev, nbuf);
  821. return NULL;
  822. }
  823. #endif
  824. #else
  825. static inline void
  826. dp_tx_vdev_id_set_hal_tx_desc(uint32_t *hal_tx_desc_cached,
  827. struct dp_vdev *vdev,
  828. struct dp_tx_msdu_info_s *msdu_info)
  829. {
  830. hal_tx_desc_set_vdev_id(hal_tx_desc_cached, vdev->vdev_id);
  831. }
  832. #endif
  833. #if defined(WLAN_FEATURE_11BE_MLO) && !defined(WLAN_MLO_MULTI_CHIP) && \
  834. !defined(WLAN_MCAST_MLO)
  835. void dp_tx_mlo_mcast_handler_be(struct dp_soc *soc,
  836. struct dp_vdev *vdev,
  837. qdf_nbuf_t nbuf)
  838. {
  839. }
  840. bool dp_tx_mlo_is_mcast_primary_be(struct dp_soc *soc,
  841. struct dp_vdev *vdev)
  842. {
  843. return false;
  844. }
  845. #endif
  846. #ifdef CONFIG_SAWF
  847. /**
  848. * dp_sawf_config_be - Configure sawf specific fields in tcl
  849. *
  850. * @soc: DP soc handle
  851. * @hal_tx_desc_cached: tx descriptor
  852. * @fw_metadata: firmware metadata
  853. * @nbuf: skb buffer
  854. * @msdu_info: msdu info
  855. *
  856. * Return: void
  857. */
  858. void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
  859. uint16_t *fw_metadata, qdf_nbuf_t nbuf,
  860. struct dp_tx_msdu_info_s *msdu_info)
  861. {
  862. uint8_t q_id = 0;
  863. if (!wlan_cfg_get_sawf_config(soc->wlan_cfg_ctx))
  864. return;
  865. q_id = dp_sawf_queue_id_get(nbuf);
  866. if (q_id == DP_SAWF_DEFAULT_Q_INVALID)
  867. return;
  868. msdu_info->tid = (q_id & (CDP_DATA_TID_MAX - 1));
  869. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached,
  870. (q_id & (CDP_DATA_TID_MAX - 1)));
  871. if ((q_id >= DP_SAWF_DEFAULT_QUEUE_MIN) &&
  872. (q_id < DP_SAWF_DEFAULT_QUEUE_MAX))
  873. return;
  874. dp_sawf_tcl_cmd(fw_metadata, nbuf);
  875. hal_tx_desc_set_flow_override_enable(hal_tx_desc_cached,
  876. DP_TX_FLOW_OVERRIDE_ENABLE);
  877. hal_tx_desc_set_flow_override(hal_tx_desc_cached,
  878. DP_TX_FLOW_OVERRIDE_GET(q_id));
  879. hal_tx_desc_set_who_classify_info_sel(hal_tx_desc_cached,
  880. DP_TX_WHO_CLFY_INF_SEL_GET(q_id));
  881. }
  882. #else
  883. static inline
  884. void dp_sawf_config_be(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
  885. uint16_t *fw_metadata, qdf_nbuf_t nbuf,
  886. struct dp_tx_msdu_info_s *msdu_info)
  887. {
  888. }
  889. static inline
  890. QDF_STATUS dp_sawf_tx_enqueue_peer_stats(struct dp_soc *soc,
  891. struct dp_tx_desc_s *tx_desc)
  892. {
  893. return QDF_STATUS_SUCCESS;
  894. }
  895. static inline
  896. QDF_STATUS dp_sawf_tx_enqueue_fail_peer_stats(struct dp_soc *soc,
  897. struct dp_tx_desc_s *tx_desc)
  898. {
  899. return QDF_STATUS_SUCCESS;
  900. }
  901. #endif
  902. #ifdef WLAN_SUPPORT_PPEDS
  903. /**
  904. * dp_ppeds_stats() - Accounting fw2wbm_tx_drop drops in Tx path
  905. * @soc: Handle to DP Soc structure
  906. * @peer_id: Peer ID in the descriptor
  907. *
  908. * Return: NONE
  909. */
  910. static inline
  911. void dp_ppeds_stats(struct dp_soc *soc, uint16_t peer_id)
  912. {
  913. struct dp_vdev *vdev = NULL;
  914. struct dp_txrx_peer *txrx_peer = NULL;
  915. dp_txrx_ref_handle txrx_ref_handle = NULL;
  916. DP_STATS_INC(soc, tx.fw2wbm_tx_drop, 1);
  917. txrx_peer = dp_txrx_peer_get_ref_by_id(soc,
  918. peer_id,
  919. &txrx_ref_handle,
  920. DP_MOD_ID_TX_COMP);
  921. if (txrx_peer) {
  922. vdev = txrx_peer->vdev;
  923. DP_STATS_INC(vdev, tx_i.dropped.fw2wbm_tx_drop, 1);
  924. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
  925. }
  926. }
  927. int dp_ppeds_tx_comp_handler(struct dp_soc_be *be_soc, uint32_t quota)
  928. {
  929. uint32_t num_avail_for_reap = 0;
  930. void *tx_comp_hal_desc;
  931. uint8_t buf_src, status = 0;
  932. uint32_t count = 0;
  933. struct dp_tx_desc_s *tx_desc = NULL;
  934. struct dp_tx_desc_s *head_desc = NULL;
  935. struct dp_tx_desc_s *tail_desc = NULL;
  936. struct dp_soc *soc = &be_soc->soc;
  937. void *last_prefetch_hw_desc = NULL;
  938. struct dp_tx_desc_s *last_prefetch_sw_desc = NULL;
  939. qdf_nbuf_t nbuf;
  940. hal_soc_handle_t hal_soc = soc->hal_soc;
  941. hal_ring_handle_t hal_ring_hdl =
  942. be_soc->ppeds_wbm_release_ring.hal_srng;
  943. struct dp_txrx_peer *txrx_peer = NULL;
  944. uint16_t peer_id = CDP_INVALID_PEER;
  945. dp_txrx_ref_handle txrx_ref_handle = NULL;
  946. struct dp_vdev *vdev = NULL;
  947. struct dp_pdev *pdev = NULL;
  948. struct dp_srng *srng;
  949. if (qdf_unlikely(dp_srng_access_start(NULL, soc, hal_ring_hdl))) {
  950. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  951. return 0;
  952. }
  953. num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
  954. if (num_avail_for_reap >= quota)
  955. num_avail_for_reap = quota;
  956. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  957. last_prefetch_hw_desc = dp_srng_dst_prefetch(hal_soc, hal_ring_hdl,
  958. num_avail_for_reap);
  959. srng = &be_soc->ppeds_wbm_release_ring;
  960. if (srng) {
  961. hal_update_ring_util(soc->hal_soc, srng->hal_srng,
  962. WBM2SW_RELEASE,
  963. &be_soc->ppeds_wbm_release_ring.stats);
  964. }
  965. while (qdf_likely(num_avail_for_reap--)) {
  966. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  967. if (qdf_unlikely(!tx_comp_hal_desc))
  968. break;
  969. buf_src = hal_tx_comp_get_buffer_source(hal_soc,
  970. tx_comp_hal_desc);
  971. if (qdf_unlikely(buf_src != HAL_TX_COMP_RELEASE_SOURCE_TQM &&
  972. buf_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  973. dp_err("Tx comp release_src != TQM | FW but from %d",
  974. buf_src);
  975. dp_assert_always_internal_ds_stat(0, be_soc,
  976. tx.tx_comp_buf_src);
  977. continue;
  978. }
  979. dp_tx_comp_get_params_from_hal_desc_be(soc, tx_comp_hal_desc,
  980. &tx_desc);
  981. if (!tx_desc) {
  982. dp_err("unable to retrieve tx_desc!");
  983. dp_assert_always_internal_ds_stat(0, be_soc,
  984. tx.tx_comp_desc_null);
  985. continue;
  986. }
  987. if (qdf_unlikely(!(tx_desc->flags &
  988. DP_TX_DESC_FLAG_ALLOCATED) ||
  989. !(tx_desc->flags & DP_TX_DESC_FLAG_PPEDS))) {
  990. dp_assert_always_internal_ds_stat(0, be_soc,
  991. tx.tx_comp_invalid_flag);
  992. continue;
  993. }
  994. tx_desc->buffer_src = buf_src;
  995. if (qdf_unlikely(buf_src == HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  996. status = hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  997. if (status != HTT_TX_FW2WBM_TX_STATUS_OK)
  998. dp_ppeds_stats(soc, tx_desc->peer_id);
  999. nbuf = dp_ppeds_tx_desc_free(soc, tx_desc);
  1000. qdf_nbuf_free(nbuf);
  1001. } else {
  1002. tx_desc->tx_status =
  1003. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  1004. /*
  1005. * Add desc sync to account for extended statistics
  1006. * during Tx completion.
  1007. */
  1008. if (peer_id != tx_desc->peer_id) {
  1009. if (txrx_peer) {
  1010. dp_txrx_peer_unref_delete(txrx_ref_handle,
  1011. DP_MOD_ID_TX_COMP);
  1012. txrx_peer = NULL;
  1013. vdev = NULL;
  1014. pdev = NULL;
  1015. }
  1016. peer_id = tx_desc->peer_id;
  1017. txrx_peer =
  1018. dp_txrx_peer_get_ref_by_id(soc, peer_id,
  1019. &txrx_ref_handle,
  1020. DP_MOD_ID_TX_COMP);
  1021. if (txrx_peer) {
  1022. vdev = txrx_peer->vdev;
  1023. if (!vdev)
  1024. goto next_desc;
  1025. pdev = vdev->pdev;
  1026. if (!pdev)
  1027. goto next_desc;
  1028. dp_tx_desc_update_fast_comp_flag(soc,
  1029. tx_desc,
  1030. !pdev->enhanced_stats_en);
  1031. if (pdev->enhanced_stats_en) {
  1032. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  1033. &tx_desc->comp, 1);
  1034. }
  1035. }
  1036. } else if (txrx_peer && vdev && pdev) {
  1037. dp_tx_desc_update_fast_comp_flag(soc,
  1038. tx_desc,
  1039. !pdev->enhanced_stats_en);
  1040. if (pdev->enhanced_stats_en) {
  1041. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  1042. &tx_desc->comp, 1);
  1043. }
  1044. }
  1045. next_desc:
  1046. if (!head_desc) {
  1047. head_desc = tx_desc;
  1048. tail_desc = tx_desc;
  1049. }
  1050. tail_desc->next = tx_desc;
  1051. tx_desc->next = NULL;
  1052. tail_desc = tx_desc;
  1053. count++;
  1054. dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
  1055. num_avail_for_reap,
  1056. hal_ring_hdl,
  1057. &last_prefetch_hw_desc,
  1058. &last_prefetch_sw_desc);
  1059. }
  1060. }
  1061. dp_srng_access_end(NULL, soc, hal_ring_hdl);
  1062. if (txrx_peer)
  1063. dp_txrx_peer_unref_delete(txrx_ref_handle,
  1064. DP_MOD_ID_TX_COMP);
  1065. if (head_desc)
  1066. dp_tx_comp_process_desc_list(soc, head_desc,
  1067. CDP_MAX_TX_COMP_PPE_RING);
  1068. return count;
  1069. }
  1070. #endif
  1071. #if defined(QCA_SUPPORT_WDS_EXTENDED)
  1072. static inline void
  1073. dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
  1074. struct cdp_tx_exception_metadata *tx_exc_metadata,
  1075. uint16_t *ast_idx, uint16_t *ast_hash)
  1076. {
  1077. struct dp_peer *peer = NULL;
  1078. if (tx_exc_metadata->is_wds_extended) {
  1079. peer = dp_peer_get_ref_by_id(soc, tx_exc_metadata->peer_id,
  1080. DP_MOD_ID_TX);
  1081. if (peer) {
  1082. *ast_idx = peer->ast_idx;
  1083. *ast_hash = peer->ast_hash;
  1084. hal_tx_desc_set_index_lookup_override
  1085. (soc->hal_soc,
  1086. hal_tx_desc_cached,
  1087. 0x1);
  1088. dp_peer_unref_delete(peer, DP_MOD_ID_TX);
  1089. }
  1090. } else {
  1091. return;
  1092. }
  1093. }
  1094. #else
  1095. static inline void
  1096. dp_get_peer_from_tx_exc_meta(struct dp_soc *soc, uint32_t *hal_tx_desc_cached,
  1097. struct cdp_tx_exception_metadata *tx_exc_metadata,
  1098. uint16_t *ast_idx, uint16_t *ast_hash)
  1099. {
  1100. }
  1101. #endif
  1102. QDF_STATUS
  1103. dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
  1104. struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
  1105. struct cdp_tx_exception_metadata *tx_exc_metadata,
  1106. struct dp_tx_msdu_info_s *msdu_info)
  1107. {
  1108. void *hal_tx_desc;
  1109. uint32_t *hal_tx_desc_cached;
  1110. int coalesce = 0;
  1111. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1112. uint8_t ring_id = tx_q->ring_id;
  1113. uint8_t tid;
  1114. struct dp_vdev_be *be_vdev;
  1115. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  1116. uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
  1117. hal_ring_handle_t hal_ring_hdl = NULL;
  1118. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  1119. uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES;
  1120. uint16_t ast_idx = vdev->bss_ast_idx;
  1121. uint16_t ast_hash = vdev->bss_ast_hash;
  1122. be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
  1123. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  1124. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  1125. return QDF_STATUS_E_RESOURCES;
  1126. }
  1127. if (qdf_unlikely(tx_exc_metadata)) {
  1128. qdf_assert_always((tx_exc_metadata->tx_encap_type ==
  1129. CDP_INVALID_TX_ENCAP_TYPE) ||
  1130. (tx_exc_metadata->tx_encap_type ==
  1131. vdev->tx_encap_type));
  1132. if (tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)
  1133. qdf_assert_always((tx_exc_metadata->sec_type ==
  1134. CDP_INVALID_SEC_TYPE) ||
  1135. tx_exc_metadata->sec_type ==
  1136. vdev->sec_type);
  1137. dp_get_peer_from_tx_exc_meta(soc, (void *)cached_desc,
  1138. tx_exc_metadata,
  1139. &ast_idx, &ast_hash);
  1140. }
  1141. hal_tx_desc_cached = (void *)cached_desc;
  1142. if (dp_sawf_tag_valid_get(tx_desc->nbuf)) {
  1143. dp_sawf_config_be(soc, hal_tx_desc_cached,
  1144. &fw_metadata, tx_desc->nbuf, msdu_info);
  1145. dp_sawf_tx_enqueue_peer_stats(soc, tx_desc);
  1146. }
  1147. hal_tx_desc_set_buf_addr_be(soc->hal_soc, hal_tx_desc_cached,
  1148. tx_desc->dma_addr, bm_id, tx_desc->id,
  1149. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
  1150. hal_tx_desc_set_lmac_id_be(soc->hal_soc, hal_tx_desc_cached,
  1151. vdev->lmac_id);
  1152. hal_tx_desc_set_search_index_be(soc->hal_soc, hal_tx_desc_cached,
  1153. ast_idx);
  1154. /*
  1155. * Bank_ID is used as DSCP_TABLE number in beryllium
  1156. * So there is no explicit field used for DSCP_TID_TABLE_NUM.
  1157. */
  1158. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  1159. (ast_hash & 0xF));
  1160. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  1161. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  1162. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  1163. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  1164. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  1165. /* verify checksum offload configuration*/
  1166. if ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) ==
  1167. QDF_NBUF_TX_CKSUM_TCP_UDP) ||
  1168. qdf_nbuf_is_tso(tx_desc->nbuf)) {
  1169. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  1170. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  1171. }
  1172. hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id);
  1173. dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
  1174. tid = msdu_info->tid;
  1175. if (tid != HTT_TX_EXT_TID_INVALID)
  1176. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  1177. dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
  1178. tx_desc->nbuf);
  1179. dp_tx_set_particular_tx_queue(soc, hal_tx_desc_cached,
  1180. tx_desc->nbuf);
  1181. dp_tx_desc_set_ktimestamp(vdev, tx_desc);
  1182. hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
  1183. if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
  1184. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  1185. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1186. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1187. dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
  1188. return status;
  1189. }
  1190. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  1191. if (qdf_unlikely(!hal_tx_desc)) {
  1192. dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
  1193. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1194. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1195. dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
  1196. goto ring_access_fail;
  1197. }
  1198. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  1199. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
  1200. /* Sync cached descriptor with HW */
  1201. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes);
  1202. coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
  1203. msdu_info, ring_id);
  1204. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, dp_tx_get_pkt_len(tx_desc));
  1205. DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
  1206. dp_tx_update_stats(soc, tx_desc, ring_id);
  1207. status = QDF_STATUS_SUCCESS;
  1208. dp_tx_hw_desc_update_evt((uint8_t *)hal_tx_desc_cached,
  1209. hal_ring_hdl, soc, ring_id);
  1210. ring_access_fail:
  1211. dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
  1212. dp_pkt_add_timestamp(vdev, QDF_PKT_TX_DRIVER_EXIT,
  1213. qdf_get_log_timestamp(), tx_desc->nbuf);
  1214. return status;
  1215. }
  1216. #ifdef IPA_OFFLOAD
  1217. static void
  1218. dp_tx_get_ipa_bank_config(struct dp_soc_be *be_soc,
  1219. union hal_tx_bank_config *bank_config)
  1220. {
  1221. bank_config->epd = 0;
  1222. bank_config->encap_type = wlan_cfg_pkt_type(be_soc->soc.wlan_cfg_ctx);
  1223. bank_config->encrypt_type = 0;
  1224. bank_config->src_buffer_swap = 0;
  1225. bank_config->link_meta_swap = 0;
  1226. bank_config->index_lookup_enable = 0;
  1227. bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
  1228. bank_config->addrx_en = 1;
  1229. bank_config->addry_en = 1;
  1230. bank_config->mesh_enable = 0;
  1231. bank_config->dscp_tid_map_id = 0;
  1232. bank_config->vdev_id_check_en = 0;
  1233. bank_config->pmac_id = 0;
  1234. }
  1235. static void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
  1236. {
  1237. union hal_tx_bank_config ipa_config = {0};
  1238. int bid;
  1239. if (!wlan_cfg_is_ipa_enabled(be_soc->soc.wlan_cfg_ctx)) {
  1240. be_soc->ipa_bank_id = DP_BE_INVALID_BANK_ID;
  1241. return;
  1242. }
  1243. dp_tx_get_ipa_bank_config(be_soc, &ipa_config);
  1244. /* Let IPA use last HOST owned bank */
  1245. bid = be_soc->num_bank_profiles - 1;
  1246. be_soc->bank_profiles[bid].is_configured = true;
  1247. be_soc->bank_profiles[bid].bank_config.val = ipa_config.val;
  1248. hal_tx_populate_bank_register(be_soc->soc.hal_soc,
  1249. &be_soc->bank_profiles[bid].bank_config,
  1250. bid);
  1251. qdf_atomic_inc(&be_soc->bank_profiles[bid].ref_count);
  1252. dp_info("IPA bank at slot %d config:0x%x", bid,
  1253. be_soc->bank_profiles[bid].bank_config.val);
  1254. be_soc->ipa_bank_id = bid;
  1255. }
  1256. #else /* !IPA_OFFLOAD */
  1257. static inline void dp_tx_init_ipa_bank_profile(struct dp_soc_be *be_soc)
  1258. {
  1259. }
  1260. #endif /* IPA_OFFLOAD */
  1261. QDF_STATUS dp_tx_init_bank_profiles(struct dp_soc_be *be_soc)
  1262. {
  1263. int i, num_tcl_banks;
  1264. num_tcl_banks = hal_tx_get_num_tcl_banks(be_soc->soc.hal_soc);
  1265. dp_assert_always_internal(num_tcl_banks);
  1266. be_soc->num_bank_profiles = num_tcl_banks;
  1267. be_soc->bank_profiles = qdf_mem_malloc(num_tcl_banks *
  1268. sizeof(*be_soc->bank_profiles));
  1269. if (!be_soc->bank_profiles) {
  1270. dp_err("unable to allocate memory for DP TX Profiles!");
  1271. return QDF_STATUS_E_NOMEM;
  1272. }
  1273. DP_TX_BANK_LOCK_CREATE(&be_soc->tx_bank_lock);
  1274. for (i = 0; i < num_tcl_banks; i++) {
  1275. be_soc->bank_profiles[i].is_configured = false;
  1276. qdf_atomic_init(&be_soc->bank_profiles[i].ref_count);
  1277. }
  1278. dp_info("initialized %u bank profiles", be_soc->num_bank_profiles);
  1279. dp_tx_init_ipa_bank_profile(be_soc);
  1280. return QDF_STATUS_SUCCESS;
  1281. }
  1282. void dp_tx_deinit_bank_profiles(struct dp_soc_be *be_soc)
  1283. {
  1284. qdf_mem_free(be_soc->bank_profiles);
  1285. DP_TX_BANK_LOCK_DESTROY(&be_soc->tx_bank_lock);
  1286. }
  1287. static
  1288. void dp_tx_get_vdev_bank_config(struct dp_vdev_be *be_vdev,
  1289. union hal_tx_bank_config *bank_config)
  1290. {
  1291. struct dp_vdev *vdev = &be_vdev->vdev;
  1292. bank_config->epd = 0;
  1293. bank_config->encap_type = vdev->tx_encap_type;
  1294. /* Only valid for raw frames. Needs work for RAW mode */
  1295. if (vdev->tx_encap_type == htt_cmn_pkt_type_raw) {
  1296. bank_config->encrypt_type = sec_type_map[vdev->sec_type];
  1297. } else {
  1298. bank_config->encrypt_type = 0;
  1299. }
  1300. bank_config->src_buffer_swap = 0;
  1301. bank_config->link_meta_swap = 0;
  1302. if ((vdev->search_type == HAL_TX_ADDR_INDEX_SEARCH) &&
  1303. vdev->opmode == wlan_op_mode_sta) {
  1304. bank_config->index_lookup_enable = 1;
  1305. bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_MEC_NOTIFY;
  1306. bank_config->addrx_en = 0;
  1307. bank_config->addry_en = 0;
  1308. } else {
  1309. bank_config->index_lookup_enable = 0;
  1310. bank_config->mcast_pkt_ctrl = HAL_TX_MCAST_CTRL_FW_EXCEPTION;
  1311. bank_config->addrx_en =
  1312. (vdev->hal_desc_addr_search_flags &
  1313. HAL_TX_DESC_ADDRX_EN) ? 1 : 0;
  1314. bank_config->addry_en =
  1315. (vdev->hal_desc_addr_search_flags &
  1316. HAL_TX_DESC_ADDRY_EN) ? 1 : 0;
  1317. }
  1318. bank_config->mesh_enable = vdev->mesh_vdev ? 1 : 0;
  1319. bank_config->dscp_tid_map_id = vdev->dscp_tid_map_id;
  1320. /* Disabling vdev id check for now. Needs revist. */
  1321. bank_config->vdev_id_check_en = be_vdev->vdev_id_check_en;
  1322. bank_config->pmac_id = vdev->lmac_id;
  1323. }
  1324. int dp_tx_get_bank_profile(struct dp_soc_be *be_soc,
  1325. struct dp_vdev_be *be_vdev)
  1326. {
  1327. char *temp_str = "";
  1328. bool found_match = false;
  1329. int bank_id = DP_BE_INVALID_BANK_ID;
  1330. int i;
  1331. int unconfigured_slot = DP_BE_INVALID_BANK_ID;
  1332. int zero_ref_count_slot = DP_BE_INVALID_BANK_ID;
  1333. union hal_tx_bank_config vdev_config = {0};
  1334. /* convert vdev params into hal_tx_bank_config */
  1335. dp_tx_get_vdev_bank_config(be_vdev, &vdev_config);
  1336. DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
  1337. /* go over all banks and find a matching/unconfigured/unused bank */
  1338. for (i = 0; i < be_soc->num_bank_profiles; i++) {
  1339. if (be_soc->bank_profiles[i].is_configured &&
  1340. (be_soc->bank_profiles[i].bank_config.val ^
  1341. vdev_config.val) == 0) {
  1342. found_match = true;
  1343. break;
  1344. }
  1345. if (unconfigured_slot == DP_BE_INVALID_BANK_ID &&
  1346. !be_soc->bank_profiles[i].is_configured)
  1347. unconfigured_slot = i;
  1348. else if (zero_ref_count_slot == DP_BE_INVALID_BANK_ID &&
  1349. !qdf_atomic_read(&be_soc->bank_profiles[i].ref_count))
  1350. zero_ref_count_slot = i;
  1351. }
  1352. if (found_match) {
  1353. temp_str = "matching";
  1354. bank_id = i;
  1355. goto inc_ref_and_return;
  1356. }
  1357. if (unconfigured_slot != DP_BE_INVALID_BANK_ID) {
  1358. temp_str = "unconfigured";
  1359. bank_id = unconfigured_slot;
  1360. goto configure_and_return;
  1361. }
  1362. if (zero_ref_count_slot != DP_BE_INVALID_BANK_ID) {
  1363. temp_str = "zero_ref_count";
  1364. bank_id = zero_ref_count_slot;
  1365. }
  1366. if (bank_id == DP_BE_INVALID_BANK_ID) {
  1367. dp_alert("unable to find TX bank!");
  1368. QDF_BUG(0);
  1369. return bank_id;
  1370. }
  1371. configure_and_return:
  1372. be_soc->bank_profiles[bank_id].is_configured = true;
  1373. be_soc->bank_profiles[bank_id].bank_config.val = vdev_config.val;
  1374. hal_tx_populate_bank_register(be_soc->soc.hal_soc,
  1375. &be_soc->bank_profiles[bank_id].bank_config,
  1376. bank_id);
  1377. inc_ref_and_return:
  1378. qdf_atomic_inc(&be_soc->bank_profiles[bank_id].ref_count);
  1379. DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
  1380. dp_info("found %s slot at index %d, input:0x%x match:0x%x ref_count %u",
  1381. temp_str, bank_id, vdev_config.val,
  1382. be_soc->bank_profiles[bank_id].bank_config.val,
  1383. qdf_atomic_read(&be_soc->bank_profiles[bank_id].ref_count));
  1384. dp_info("epd:%x encap:%x encryp:%x src_buf_swap:%x link_meta_swap:%x addrx_en:%x addry_en:%x mesh_en:%x vdev_id_check:%x pmac_id:%x mcast_pkt_ctrl:%x",
  1385. be_soc->bank_profiles[bank_id].bank_config.epd,
  1386. be_soc->bank_profiles[bank_id].bank_config.encap_type,
  1387. be_soc->bank_profiles[bank_id].bank_config.encrypt_type,
  1388. be_soc->bank_profiles[bank_id].bank_config.src_buffer_swap,
  1389. be_soc->bank_profiles[bank_id].bank_config.link_meta_swap,
  1390. be_soc->bank_profiles[bank_id].bank_config.addrx_en,
  1391. be_soc->bank_profiles[bank_id].bank_config.addry_en,
  1392. be_soc->bank_profiles[bank_id].bank_config.mesh_enable,
  1393. be_soc->bank_profiles[bank_id].bank_config.vdev_id_check_en,
  1394. be_soc->bank_profiles[bank_id].bank_config.pmac_id,
  1395. be_soc->bank_profiles[bank_id].bank_config.mcast_pkt_ctrl);
  1396. return bank_id;
  1397. }
  1398. void dp_tx_put_bank_profile(struct dp_soc_be *be_soc,
  1399. struct dp_vdev_be *be_vdev)
  1400. {
  1401. DP_TX_BANK_LOCK_ACQUIRE(&be_soc->tx_bank_lock);
  1402. qdf_atomic_dec(&be_soc->bank_profiles[be_vdev->bank_id].ref_count);
  1403. DP_TX_BANK_LOCK_RELEASE(&be_soc->tx_bank_lock);
  1404. }
  1405. void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
  1406. struct dp_vdev_be *be_vdev)
  1407. {
  1408. dp_tx_put_bank_profile(be_soc, be_vdev);
  1409. be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
  1410. be_vdev->vdev.bank_id = be_vdev->bank_id;
  1411. }
  1412. QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
  1413. uint32_t num_elem,
  1414. uint8_t pool_id,
  1415. bool spcl_tx_desc)
  1416. {
  1417. struct dp_tx_desc_pool_s *tx_desc_pool;
  1418. struct dp_hw_cookie_conversion_t *cc_ctx;
  1419. struct dp_spt_page_desc *page_desc;
  1420. struct dp_tx_desc_s *tx_desc;
  1421. uint32_t ppt_idx = 0;
  1422. uint32_t avail_entry_index = 0;
  1423. if (!num_elem) {
  1424. dp_err("desc_num 0 !!");
  1425. return QDF_STATUS_E_FAILURE;
  1426. }
  1427. if (spcl_tx_desc) {
  1428. tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
  1429. cc_ctx = dp_get_spcl_tx_cookie_t(soc, pool_id);
  1430. } else {
  1431. tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);;
  1432. cc_ctx = dp_get_tx_cookie_t(soc, pool_id);
  1433. }
  1434. tx_desc = tx_desc_pool->freelist;
  1435. page_desc = &cc_ctx->page_desc_base[0];
  1436. while (tx_desc) {
  1437. if (avail_entry_index == 0) {
  1438. if (ppt_idx >= cc_ctx->total_page_num) {
  1439. dp_alert("insufficient secondary page tables");
  1440. qdf_assert_always(0);
  1441. }
  1442. page_desc = &cc_ctx->page_desc_base[ppt_idx++];
  1443. }
  1444. /* put each TX Desc VA to SPT pages and
  1445. * get corresponding ID
  1446. */
  1447. DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
  1448. avail_entry_index,
  1449. tx_desc);
  1450. tx_desc->id =
  1451. dp_cc_desc_id_generate(page_desc->ppt_index,
  1452. avail_entry_index);
  1453. tx_desc->pool_id = pool_id;
  1454. dp_tx_desc_set_magic(tx_desc, DP_TX_MAGIC_PATTERN_FREE);
  1455. tx_desc = tx_desc->next;
  1456. avail_entry_index = (avail_entry_index + 1) &
  1457. DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
  1458. }
  1459. return QDF_STATUS_SUCCESS;
  1460. }
  1461. void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
  1462. struct dp_tx_desc_pool_s *tx_desc_pool,
  1463. uint8_t pool_id, bool spcl_tx_desc)
  1464. {
  1465. struct dp_spt_page_desc *page_desc;
  1466. int i = 0;
  1467. struct dp_hw_cookie_conversion_t *cc_ctx;
  1468. if (spcl_tx_desc)
  1469. cc_ctx = dp_get_spcl_tx_cookie_t(soc, pool_id);
  1470. else
  1471. cc_ctx = dp_get_tx_cookie_t(soc, pool_id);
  1472. for (i = 0; i < cc_ctx->total_page_num; i++) {
  1473. page_desc = &cc_ctx->page_desc_base[i];
  1474. qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
  1475. }
  1476. }
  1477. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  1478. uint32_t dp_tx_comp_nf_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  1479. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  1480. uint32_t quota)
  1481. {
  1482. struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
  1483. uint32_t work_done = 0;
  1484. if (dp_srng_get_near_full_level(soc, tx_comp_ring) <
  1485. DP_SRNG_THRESH_NEAR_FULL)
  1486. return 0;
  1487. qdf_atomic_set(&tx_comp_ring->near_full, 1);
  1488. work_done++;
  1489. return work_done;
  1490. }
  1491. #endif
  1492. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1493. defined(WLAN_CONFIG_TX_DELAY)
  1494. #define PPDUID_GET_HW_LINK_ID(PPDU_ID, LINK_ID_OFFSET, LINK_ID_BITS) \
  1495. (((PPDU_ID) >> (LINK_ID_OFFSET)) & ((1 << (LINK_ID_BITS)) - 1))
  1496. #define HW_TX_DELAY_MAX 0x1000000
  1497. #define TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US 10
  1498. #define HW_TX_DELAY_MASK 0x1FFFFFFF
  1499. #define TX_COMPL_BUFFER_TSTAMP_US(TSTAMP) \
  1500. (((TSTAMP) << TX_COMPL_SHIFT_BUFFER_TIMESTAMP_US) & \
  1501. HW_TX_DELAY_MASK)
  1502. static inline
  1503. QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
  1504. struct dp_vdev *vdev,
  1505. struct hal_tx_completion_status *ts,
  1506. uint32_t *delay_us)
  1507. {
  1508. uint32_t ppdu_id;
  1509. uint8_t link_id_offset, link_id_bits;
  1510. uint8_t hw_link_id;
  1511. uint32_t msdu_tqm_enqueue_tstamp_us, final_msdu_tqm_enqueue_tstamp_us;
  1512. uint32_t msdu_compl_tsf_tstamp_us, final_msdu_compl_tsf_tstamp_us;
  1513. uint32_t delay;
  1514. int32_t delta_tsf2, delta_tqm;
  1515. if (!ts->valid)
  1516. return QDF_STATUS_E_INVAL;
  1517. link_id_offset = soc->link_id_offset;
  1518. link_id_bits = soc->link_id_bits;
  1519. ppdu_id = ts->ppdu_id;
  1520. hw_link_id = PPDUID_GET_HW_LINK_ID(ppdu_id, link_id_offset,
  1521. link_id_bits);
  1522. msdu_tqm_enqueue_tstamp_us =
  1523. TX_COMPL_BUFFER_TSTAMP_US(ts->buffer_timestamp);
  1524. msdu_compl_tsf_tstamp_us = ts->tsf;
  1525. delta_tsf2 = dp_mlo_get_delta_tsf2_wrt_mlo_offset(soc, hw_link_id);
  1526. delta_tqm = dp_mlo_get_delta_tqm_wrt_mlo_offset(soc);
  1527. final_msdu_tqm_enqueue_tstamp_us = (msdu_tqm_enqueue_tstamp_us +
  1528. delta_tqm) & HW_TX_DELAY_MASK;
  1529. final_msdu_compl_tsf_tstamp_us = (msdu_compl_tsf_tstamp_us +
  1530. delta_tsf2) & HW_TX_DELAY_MASK;
  1531. delay = (final_msdu_compl_tsf_tstamp_us -
  1532. final_msdu_tqm_enqueue_tstamp_us) & HW_TX_DELAY_MASK;
  1533. if (delay > HW_TX_DELAY_MAX)
  1534. return QDF_STATUS_E_FAILURE;
  1535. if (delay_us)
  1536. *delay_us = delay;
  1537. return QDF_STATUS_SUCCESS;
  1538. }
  1539. #else
  1540. static inline
  1541. QDF_STATUS dp_mlo_compute_hw_delay_us(struct dp_soc *soc,
  1542. struct dp_vdev *vdev,
  1543. struct hal_tx_completion_status *ts,
  1544. uint32_t *delay_us)
  1545. {
  1546. return QDF_STATUS_SUCCESS;
  1547. }
  1548. #endif
  1549. QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
  1550. struct dp_vdev *vdev,
  1551. struct hal_tx_completion_status *ts,
  1552. uint32_t *delay_us)
  1553. {
  1554. return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
  1555. }
  1556. static inline
  1557. qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev,
  1558. struct dp_tx_desc_s *tx_desc,
  1559. qdf_nbuf_t nbuf)
  1560. {
  1561. qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data,
  1562. (void *)(nbuf->data + 256));
  1563. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  1564. }
  1565. static inline
  1566. void dp_tx_nbuf_unmap_be(struct dp_soc *soc,
  1567. struct dp_tx_desc_s *desc)
  1568. {
  1569. }
  1570. #ifdef QCA_DP_TX_NBUF_LIST_FREE
  1571. qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1572. qdf_nbuf_t nbuf)
  1573. {
  1574. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  1575. struct dp_vdev *vdev = NULL;
  1576. struct dp_pdev *pdev = NULL;
  1577. struct dp_tx_desc_s *tx_desc;
  1578. uint16_t desc_pool_id;
  1579. uint16_t pkt_len;
  1580. qdf_dma_addr_t paddr;
  1581. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  1582. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  1583. hal_ring_handle_t hal_ring_hdl = NULL;
  1584. uint32_t *hal_tx_desc_cached;
  1585. void *hal_tx_desc;
  1586. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  1587. return nbuf;
  1588. vdev = soc->vdev_id_map[vdev_id];
  1589. if (qdf_unlikely(!vdev))
  1590. return nbuf;
  1591. desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
  1592. pkt_len = qdf_nbuf_headlen(nbuf);
  1593. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, pkt_len);
  1594. DP_STATS_INC(vdev, tx_i.rcvd_in_fast_xmit_flow, 1);
  1595. DP_STATS_INC(vdev, tx_i.rcvd_per_core[desc_pool_id], 1);
  1596. pdev = vdev->pdev;
  1597. if (dp_tx_limit_check(vdev, nbuf))
  1598. return nbuf;
  1599. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1600. if (qdf_unlikely(!tx_desc)) {
  1601. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1602. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  1603. return nbuf;
  1604. }
  1605. dp_tx_outstanding_inc(pdev);
  1606. /* Initialize the SW tx descriptor */
  1607. tx_desc->nbuf = nbuf;
  1608. tx_desc->frm_type = dp_tx_frm_std;
  1609. tx_desc->tx_encap_type = vdev->tx_encap_type;
  1610. tx_desc->vdev_id = vdev_id;
  1611. tx_desc->pdev = pdev;
  1612. tx_desc->pkt_offset = 0;
  1613. tx_desc->length = pkt_len;
  1614. tx_desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
  1615. if (soc->hw_txrx_stats_en)
  1616. tx_desc->flags |= DP_TX_DESC_FLAG_FASTPATH_SIMPLE;
  1617. tx_desc->nbuf->fast_recycled = 1;
  1618. if (nbuf->is_from_recycler && nbuf->fast_xmit)
  1619. tx_desc->flags |= DP_TX_DESC_FLAG_FAST;
  1620. paddr = dp_tx_nbuf_map_be(vdev, tx_desc, nbuf);
  1621. if (!paddr) {
  1622. /* Handle failure */
  1623. dp_err("qdf_nbuf_map failed");
  1624. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1625. goto release_desc;
  1626. }
  1627. tx_desc->dma_addr = paddr;
  1628. hal_tx_desc_cached = (void *)cached_desc;
  1629. hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr;
  1630. hal_tx_desc_cached[1] = tx_desc->id <<
  1631. TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB;
  1632. /* bank_id */
  1633. hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB;
  1634. hal_tx_desc_cached[3] = vdev->htt_tcl_metadata <<
  1635. TCL_DATA_CMD_TCL_CMD_NUMBER_LSB;
  1636. hal_tx_desc_cached[4] = tx_desc->length;
  1637. /* l3 and l4 checksum enable */
  1638. hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE <<
  1639. TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB;
  1640. hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB;
  1641. hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB;
  1642. if (vdev->opmode == wlan_op_mode_sta)
  1643. hal_tx_desc_cached[6] = vdev->bss_ast_idx |
  1644. ((vdev->bss_ast_hash & 0xF) <<
  1645. TCL_DATA_CMD_CACHE_SET_NUM_LSB);
  1646. hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id);
  1647. if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
  1648. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  1649. DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
  1650. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1651. goto ring_access_fail2;
  1652. }
  1653. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  1654. if (qdf_unlikely(!hal_tx_desc)) {
  1655. dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
  1656. DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
  1657. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1658. goto ring_access_fail;
  1659. }
  1660. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  1661. /* Sync cached descriptor with HW */
  1662. qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, DP_TX_FAST_DESC_SIZE);
  1663. qdf_dsb();
  1664. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
  1665. DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
  1666. status = QDF_STATUS_SUCCESS;
  1667. ring_access_fail:
  1668. dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
  1669. ring_access_fail2:
  1670. if (status != QDF_STATUS_SUCCESS) {
  1671. dp_tx_nbuf_unmap_be(soc, tx_desc);
  1672. goto release_desc;
  1673. }
  1674. return NULL;
  1675. release_desc:
  1676. dp_tx_desc_release(soc, tx_desc, desc_pool_id);
  1677. return nbuf;
  1678. }
  1679. #endif
  1680. QDF_STATUS dp_tx_desc_pool_alloc_be(struct dp_soc *soc, uint32_t num_elem,
  1681. uint8_t pool_id)
  1682. {
  1683. return QDF_STATUS_SUCCESS;
  1684. }
  1685. void dp_tx_desc_pool_free_be(struct dp_soc *soc, uint8_t pool_id)
  1686. {
  1687. }