dp_tx.c 116 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_htt.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include "dp_peer.h"
  24. #include "dp_types.h"
  25. #include "hal_tx.h"
  26. #include "qdf_mem.h"
  27. #include "qdf_nbuf.h"
  28. #include "qdf_net_types.h"
  29. #include <wlan_cfg.h>
  30. #include "dp_ipa.h"
  31. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  32. #include "if_meta_hdr.h"
  33. #endif
  34. #include "enet.h"
  35. #include "dp_internal.h"
  36. #ifdef FEATURE_WDS
  37. #include "dp_txrx_wds.h"
  38. #endif
  39. #ifdef ATH_SUPPORT_IQUE
  40. #include "dp_txrx_me.h"
  41. #endif
  42. /* TODO Add support in TSO */
  43. #define DP_DESC_NUM_FRAG(x) 0
  44. /* disable TQM_BYPASS */
  45. #define TQM_BYPASS_WAR 0
  46. /* invalid peer id for reinject*/
  47. #define DP_INVALID_PEER 0XFFFE
  48. /*mapping between hal encrypt type and cdp_sec_type*/
  49. #define MAX_CDP_SEC_TYPE 12
  50. static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
  51. HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  52. HAL_TX_ENCRYPT_TYPE_WEP_128,
  53. HAL_TX_ENCRYPT_TYPE_WEP_104,
  54. HAL_TX_ENCRYPT_TYPE_WEP_40,
  55. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  56. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  57. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  58. HAL_TX_ENCRYPT_TYPE_WAPI,
  59. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  60. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  61. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  62. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  63. #ifdef QCA_TX_LIMIT_CHECK
  64. /**
  65. * dp_tx_limit_check - Check if allocated tx descriptors reached
  66. * soc max limit and pdev max limit
  67. * @vdev: DP vdev handle
  68. *
  69. * Return: true if allocated tx descriptors reached max configured value, else
  70. * false
  71. */
  72. static inline bool
  73. dp_tx_limit_check(struct dp_vdev *vdev)
  74. {
  75. struct dp_pdev *pdev = vdev->pdev;
  76. struct dp_soc *soc = pdev->soc;
  77. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  78. soc->num_tx_allowed) {
  79. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  80. "%s: queued packets are more than max tx, drop the frame",
  81. __func__);
  82. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  83. return true;
  84. }
  85. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  86. pdev->num_tx_allowed) {
  87. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  88. "%s: queued packets are more than max tx, drop the frame",
  89. __func__);
  90. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  91. return true;
  92. }
  93. return false;
  94. }
  95. /**
  96. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  97. * @vdev: DP pdev handle
  98. *
  99. * Return: void
  100. */
  101. static inline void
  102. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  103. {
  104. struct dp_soc *soc = pdev->soc;
  105. qdf_atomic_inc(&pdev->num_tx_outstanding);
  106. qdf_atomic_inc(&soc->num_tx_outstanding);
  107. }
  108. /**
  109. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  110. * @vdev: DP pdev handle
  111. *
  112. * Return: void
  113. */
  114. static inline void
  115. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  116. {
  117. struct dp_soc *soc = pdev->soc;
  118. qdf_atomic_dec(&pdev->num_tx_outstanding);
  119. qdf_atomic_dec(&soc->num_tx_outstanding);
  120. }
  121. #else //QCA_TX_LIMIT_CHECK
  122. static inline bool
  123. dp_tx_limit_check(struct dp_vdev *vdev)
  124. {
  125. return false;
  126. }
  127. static inline void
  128. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  129. {
  130. qdf_atomic_inc(&pdev->num_tx_outstanding);
  131. }
  132. static inline void
  133. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  134. {
  135. qdf_atomic_dec(&pdev->num_tx_outstanding);
  136. }
  137. #endif //QCA_TX_LIMIT_CHECK
  138. #if defined(FEATURE_TSO)
  139. /**
  140. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  141. *
  142. * @soc - core txrx main context
  143. * @seg_desc - tso segment descriptor
  144. * @num_seg_desc - tso number segment descriptor
  145. */
  146. static void dp_tx_tso_unmap_segment(
  147. struct dp_soc *soc,
  148. struct qdf_tso_seg_elem_t *seg_desc,
  149. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  150. {
  151. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  152. if (qdf_unlikely(!seg_desc)) {
  153. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  154. __func__, __LINE__);
  155. qdf_assert(0);
  156. } else if (qdf_unlikely(!num_seg_desc)) {
  157. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  158. __func__, __LINE__);
  159. qdf_assert(0);
  160. } else {
  161. bool is_last_seg;
  162. /* no tso segment left to do dma unmap */
  163. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  164. return;
  165. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  166. true : false;
  167. qdf_nbuf_unmap_tso_segment(soc->osdev,
  168. seg_desc, is_last_seg);
  169. num_seg_desc->num_seg.tso_cmn_num_seg--;
  170. }
  171. }
  172. /**
  173. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  174. * back to the freelist
  175. *
  176. * @soc - soc device handle
  177. * @tx_desc - Tx software descriptor
  178. */
  179. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  180. struct dp_tx_desc_s *tx_desc)
  181. {
  182. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  183. if (qdf_unlikely(!tx_desc->tso_desc)) {
  184. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  185. "%s %d TSO desc is NULL!",
  186. __func__, __LINE__);
  187. qdf_assert(0);
  188. } else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
  189. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  190. "%s %d TSO num desc is NULL!",
  191. __func__, __LINE__);
  192. qdf_assert(0);
  193. } else {
  194. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  195. (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
  196. /* Add the tso num segment into the free list */
  197. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  198. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  199. tx_desc->tso_num_desc);
  200. tx_desc->tso_num_desc = NULL;
  201. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  202. }
  203. /* Add the tso segment into the free list*/
  204. dp_tx_tso_desc_free(soc,
  205. tx_desc->pool_id, tx_desc->tso_desc);
  206. tx_desc->tso_desc = NULL;
  207. }
  208. }
  209. #else
  210. static void dp_tx_tso_unmap_segment(
  211. struct dp_soc *soc,
  212. struct qdf_tso_seg_elem_t *seg_desc,
  213. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  214. {
  215. }
  216. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  217. struct dp_tx_desc_s *tx_desc)
  218. {
  219. }
  220. #endif
  221. /**
  222. * dp_tx_desc_release() - Release Tx Descriptor
  223. * @tx_desc : Tx Descriptor
  224. * @desc_pool_id: Descriptor Pool ID
  225. *
  226. * Deallocate all resources attached to Tx descriptor and free the Tx
  227. * descriptor.
  228. *
  229. * Return:
  230. */
  231. static void
  232. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  233. {
  234. struct dp_pdev *pdev = tx_desc->pdev;
  235. struct dp_soc *soc;
  236. uint8_t comp_status = 0;
  237. qdf_assert(pdev);
  238. soc = pdev->soc;
  239. if (tx_desc->frm_type == dp_tx_frm_tso)
  240. dp_tx_tso_desc_release(soc, tx_desc);
  241. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  242. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  243. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  244. dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
  245. dp_tx_outstanding_dec(pdev);
  246. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  247. qdf_atomic_dec(&pdev->num_tx_exception);
  248. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  249. hal_tx_comp_get_buffer_source(&tx_desc->comp))
  250. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  251. soc->hal_soc);
  252. else
  253. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  254. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  255. "Tx Completion Release desc %d status %d outstanding %d",
  256. tx_desc->id, comp_status,
  257. qdf_atomic_read(&pdev->num_tx_outstanding));
  258. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  259. return;
  260. }
  261. /**
  262. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  263. * @vdev: DP vdev Handle
  264. * @nbuf: skb
  265. * @msdu_info: msdu_info required to create HTT metadata
  266. *
  267. * Prepares and fills HTT metadata in the frame pre-header for special frames
  268. * that should be transmitted using varying transmit parameters.
  269. * There are 2 VDEV modes that currently needs this special metadata -
  270. * 1) Mesh Mode
  271. * 2) DSRC Mode
  272. *
  273. * Return: HTT metadata size
  274. *
  275. */
  276. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  277. struct dp_tx_msdu_info_s *msdu_info)
  278. {
  279. uint32_t *meta_data = msdu_info->meta_data;
  280. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  281. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  282. uint8_t htt_desc_size;
  283. /* Size rounded of multiple of 8 bytes */
  284. uint8_t htt_desc_size_aligned;
  285. uint8_t *hdr = NULL;
  286. /*
  287. * Metadata - HTT MSDU Extension header
  288. */
  289. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  290. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  291. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  292. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  293. meta_data[0])) {
  294. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  295. htt_desc_size_aligned)) {
  296. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  297. htt_desc_size_aligned);
  298. if (!nbuf) {
  299. /*
  300. * qdf_nbuf_realloc_headroom won't do skb_clone
  301. * as skb_realloc_headroom does. so, no free is
  302. * needed here.
  303. */
  304. DP_STATS_INC(vdev,
  305. tx_i.dropped.headroom_insufficient,
  306. 1);
  307. qdf_print(" %s[%d] skb_realloc_headroom failed",
  308. __func__, __LINE__);
  309. return 0;
  310. }
  311. }
  312. /* Fill and add HTT metaheader */
  313. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  314. if (!hdr) {
  315. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  316. "Error in filling HTT metadata");
  317. return 0;
  318. }
  319. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  320. } else if (vdev->opmode == wlan_op_mode_ocb) {
  321. /* Todo - Add support for DSRC */
  322. }
  323. return htt_desc_size_aligned;
  324. }
  325. /**
  326. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  327. * @tso_seg: TSO segment to process
  328. * @ext_desc: Pointer to MSDU extension descriptor
  329. *
  330. * Return: void
  331. */
  332. #if defined(FEATURE_TSO)
  333. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  334. void *ext_desc)
  335. {
  336. uint8_t num_frag;
  337. uint32_t tso_flags;
  338. /*
  339. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  340. * tcp_flag_mask
  341. *
  342. * Checksum enable flags are set in TCL descriptor and not in Extension
  343. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  344. */
  345. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  346. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  347. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  348. tso_seg->tso_flags.ip_len);
  349. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  350. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  351. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  352. uint32_t lo = 0;
  353. uint32_t hi = 0;
  354. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  355. (tso_seg->tso_frags[num_frag].length));
  356. qdf_dmaaddr_to_32s(
  357. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  358. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  359. tso_seg->tso_frags[num_frag].length);
  360. }
  361. return;
  362. }
  363. #else
  364. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  365. void *ext_desc)
  366. {
  367. return;
  368. }
  369. #endif
  370. #if defined(FEATURE_TSO)
  371. /**
  372. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  373. * allocated and free them
  374. *
  375. * @soc: soc handle
  376. * @free_seg: list of tso segments
  377. * @msdu_info: msdu descriptor
  378. *
  379. * Return - void
  380. */
  381. static void dp_tx_free_tso_seg_list(
  382. struct dp_soc *soc,
  383. struct qdf_tso_seg_elem_t *free_seg,
  384. struct dp_tx_msdu_info_s *msdu_info)
  385. {
  386. struct qdf_tso_seg_elem_t *next_seg;
  387. while (free_seg) {
  388. next_seg = free_seg->next;
  389. dp_tx_tso_desc_free(soc,
  390. msdu_info->tx_queue.desc_pool_id,
  391. free_seg);
  392. free_seg = next_seg;
  393. }
  394. }
  395. /**
  396. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  397. * allocated and free them
  398. *
  399. * @soc: soc handle
  400. * @free_num_seg: list of tso number segments
  401. * @msdu_info: msdu descriptor
  402. * Return - void
  403. */
  404. static void dp_tx_free_tso_num_seg_list(
  405. struct dp_soc *soc,
  406. struct qdf_tso_num_seg_elem_t *free_num_seg,
  407. struct dp_tx_msdu_info_s *msdu_info)
  408. {
  409. struct qdf_tso_num_seg_elem_t *next_num_seg;
  410. while (free_num_seg) {
  411. next_num_seg = free_num_seg->next;
  412. dp_tso_num_seg_free(soc,
  413. msdu_info->tx_queue.desc_pool_id,
  414. free_num_seg);
  415. free_num_seg = next_num_seg;
  416. }
  417. }
  418. /**
  419. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  420. * do dma unmap for each segment
  421. *
  422. * @soc: soc handle
  423. * @free_seg: list of tso segments
  424. * @num_seg_desc: tso number segment descriptor
  425. *
  426. * Return - void
  427. */
  428. static void dp_tx_unmap_tso_seg_list(
  429. struct dp_soc *soc,
  430. struct qdf_tso_seg_elem_t *free_seg,
  431. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  432. {
  433. struct qdf_tso_seg_elem_t *next_seg;
  434. if (qdf_unlikely(!num_seg_desc)) {
  435. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  436. return;
  437. }
  438. while (free_seg) {
  439. next_seg = free_seg->next;
  440. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  441. free_seg = next_seg;
  442. }
  443. }
  444. #ifdef FEATURE_TSO_STATS
  445. /**
  446. * dp_tso_get_stats_idx: Retrieve the tso packet id
  447. * @pdev - pdev handle
  448. *
  449. * Return: id
  450. */
  451. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  452. {
  453. uint32_t stats_idx;
  454. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  455. % CDP_MAX_TSO_PACKETS);
  456. return stats_idx;
  457. }
  458. #else
  459. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  460. {
  461. return 0;
  462. }
  463. #endif /* FEATURE_TSO_STATS */
  464. /**
  465. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  466. * free the tso segments descriptor and
  467. * tso num segments descriptor
  468. *
  469. * @soc: soc handle
  470. * @msdu_info: msdu descriptor
  471. * @tso_seg_unmap: flag to show if dma unmap is necessary
  472. *
  473. * Return - void
  474. */
  475. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  476. struct dp_tx_msdu_info_s *msdu_info,
  477. bool tso_seg_unmap)
  478. {
  479. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  480. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  481. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  482. tso_info->tso_num_seg_list;
  483. /* do dma unmap for each segment */
  484. if (tso_seg_unmap)
  485. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  486. /* free all tso number segment descriptor though looks only have 1 */
  487. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  488. /* free all tso segment descriptor */
  489. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  490. }
  491. /**
  492. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  493. * @vdev: virtual device handle
  494. * @msdu: network buffer
  495. * @msdu_info: meta data associated with the msdu
  496. *
  497. * Return: QDF_STATUS_SUCCESS success
  498. */
  499. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  500. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  501. {
  502. struct qdf_tso_seg_elem_t *tso_seg;
  503. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  504. struct dp_soc *soc = vdev->pdev->soc;
  505. struct dp_pdev *pdev = vdev->pdev;
  506. struct qdf_tso_info_t *tso_info;
  507. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  508. tso_info = &msdu_info->u.tso_info;
  509. tso_info->curr_seg = NULL;
  510. tso_info->tso_seg_list = NULL;
  511. tso_info->num_segs = num_seg;
  512. msdu_info->frm_type = dp_tx_frm_tso;
  513. tso_info->tso_num_seg_list = NULL;
  514. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  515. while (num_seg) {
  516. tso_seg = dp_tx_tso_desc_alloc(
  517. soc, msdu_info->tx_queue.desc_pool_id);
  518. if (tso_seg) {
  519. tso_seg->next = tso_info->tso_seg_list;
  520. tso_info->tso_seg_list = tso_seg;
  521. num_seg--;
  522. } else {
  523. dp_err_rl("Failed to alloc tso seg desc");
  524. DP_STATS_INC_PKT(vdev->pdev,
  525. tso_stats.tso_no_mem_dropped, 1,
  526. qdf_nbuf_len(msdu));
  527. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  528. return QDF_STATUS_E_NOMEM;
  529. }
  530. }
  531. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  532. tso_num_seg = dp_tso_num_seg_alloc(soc,
  533. msdu_info->tx_queue.desc_pool_id);
  534. if (tso_num_seg) {
  535. tso_num_seg->next = tso_info->tso_num_seg_list;
  536. tso_info->tso_num_seg_list = tso_num_seg;
  537. } else {
  538. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  539. __func__);
  540. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  541. return QDF_STATUS_E_NOMEM;
  542. }
  543. msdu_info->num_seg =
  544. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  545. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  546. msdu_info->num_seg);
  547. if (!(msdu_info->num_seg)) {
  548. /*
  549. * Free allocated TSO seg desc and number seg desc,
  550. * do unmap for segments if dma map has done.
  551. */
  552. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  553. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  554. return QDF_STATUS_E_INVAL;
  555. }
  556. tso_info->curr_seg = tso_info->tso_seg_list;
  557. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  558. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  559. msdu, msdu_info->num_seg);
  560. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  561. tso_info->msdu_stats_idx);
  562. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  563. return QDF_STATUS_SUCCESS;
  564. }
  565. #else
  566. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  567. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  568. {
  569. return QDF_STATUS_E_NOMEM;
  570. }
  571. #endif
  572. /**
  573. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  574. * @vdev: DP Vdev handle
  575. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  576. * @desc_pool_id: Descriptor Pool ID
  577. *
  578. * Return:
  579. */
  580. static
  581. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  582. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  583. {
  584. uint8_t i;
  585. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  586. struct dp_tx_seg_info_s *seg_info;
  587. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  588. struct dp_soc *soc = vdev->pdev->soc;
  589. /* Allocate an extension descriptor */
  590. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  591. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  592. if (!msdu_ext_desc) {
  593. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  594. return NULL;
  595. }
  596. if (msdu_info->exception_fw &&
  597. qdf_unlikely(vdev->mesh_vdev)) {
  598. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  599. &msdu_info->meta_data[0],
  600. sizeof(struct htt_tx_msdu_desc_ext2_t));
  601. qdf_atomic_inc(&vdev->pdev->num_tx_exception);
  602. }
  603. switch (msdu_info->frm_type) {
  604. case dp_tx_frm_sg:
  605. case dp_tx_frm_me:
  606. case dp_tx_frm_raw:
  607. seg_info = msdu_info->u.sg_info.curr_seg;
  608. /* Update the buffer pointers in MSDU Extension Descriptor */
  609. for (i = 0; i < seg_info->frag_cnt; i++) {
  610. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  611. seg_info->frags[i].paddr_lo,
  612. seg_info->frags[i].paddr_hi,
  613. seg_info->frags[i].len);
  614. }
  615. break;
  616. case dp_tx_frm_tso:
  617. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  618. &cached_ext_desc[0]);
  619. break;
  620. default:
  621. break;
  622. }
  623. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  624. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  625. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  626. msdu_ext_desc->vaddr);
  627. return msdu_ext_desc;
  628. }
  629. /**
  630. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  631. *
  632. * @skb: skb to be traced
  633. * @msdu_id: msdu_id of the packet
  634. * @vdev_id: vdev_id of the packet
  635. *
  636. * Return: None
  637. */
  638. #ifdef DP_DISABLE_TX_PKT_TRACE
  639. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  640. uint8_t vdev_id)
  641. {
  642. }
  643. #else
  644. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  645. uint8_t vdev_id)
  646. {
  647. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  648. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  649. DPTRACE(qdf_dp_trace_ptr(skb,
  650. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  651. QDF_TRACE_DEFAULT_PDEV_ID,
  652. qdf_nbuf_data_addr(skb),
  653. sizeof(qdf_nbuf_data(skb)),
  654. msdu_id, vdev_id));
  655. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  656. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  657. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  658. msdu_id, QDF_TX));
  659. }
  660. #endif
  661. /**
  662. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  663. * @vdev: DP vdev handle
  664. * @nbuf: skb
  665. * @desc_pool_id: Descriptor pool ID
  666. * @meta_data: Metadata to the fw
  667. * @tx_exc_metadata: Handle that holds exception path metadata
  668. * Allocate and prepare Tx descriptor with msdu information.
  669. *
  670. * Return: Pointer to Tx Descriptor on success,
  671. * NULL on failure
  672. */
  673. static
  674. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  675. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  676. struct dp_tx_msdu_info_s *msdu_info,
  677. struct cdp_tx_exception_metadata *tx_exc_metadata)
  678. {
  679. uint8_t align_pad;
  680. uint8_t is_exception = 0;
  681. uint8_t htt_hdr_size;
  682. qdf_ether_header_t *eh;
  683. struct dp_tx_desc_s *tx_desc;
  684. struct dp_pdev *pdev = vdev->pdev;
  685. struct dp_soc *soc = pdev->soc;
  686. if (dp_tx_limit_check(vdev))
  687. return NULL;
  688. /* Allocate software Tx descriptor */
  689. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  690. if (qdf_unlikely(!tx_desc)) {
  691. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  692. return NULL;
  693. }
  694. dp_tx_outstanding_inc(pdev);
  695. /* Initialize the SW tx descriptor */
  696. tx_desc->nbuf = nbuf;
  697. tx_desc->frm_type = dp_tx_frm_std;
  698. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  699. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  700. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  701. tx_desc->vdev = vdev;
  702. tx_desc->pdev = pdev;
  703. tx_desc->msdu_ext_desc = NULL;
  704. tx_desc->pkt_offset = 0;
  705. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  706. if (qdf_unlikely(vdev->multipass_en)) {
  707. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  708. goto failure;
  709. }
  710. /*
  711. * For special modes (vdev_type == ocb or mesh), data frames should be
  712. * transmitted using varying transmit parameters (tx spec) which include
  713. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  714. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  715. * These frames are sent as exception packets to firmware.
  716. *
  717. * HW requirement is that metadata should always point to a
  718. * 8-byte aligned address. So we add alignment pad to start of buffer.
  719. * HTT Metadata should be ensured to be multiple of 8-bytes,
  720. * to get 8-byte aligned start address along with align_pad added
  721. *
  722. * |-----------------------------|
  723. * | |
  724. * |-----------------------------| <-----Buffer Pointer Address given
  725. * | | ^ in HW descriptor (aligned)
  726. * | HTT Metadata | |
  727. * | | |
  728. * | | | Packet Offset given in descriptor
  729. * | | |
  730. * |-----------------------------| |
  731. * | Alignment Pad | v
  732. * |-----------------------------| <----- Actual buffer start address
  733. * | SKB Data | (Unaligned)
  734. * | |
  735. * | |
  736. * | |
  737. * | |
  738. * | |
  739. * |-----------------------------|
  740. */
  741. if (qdf_unlikely((msdu_info->exception_fw)) ||
  742. (vdev->opmode == wlan_op_mode_ocb) ||
  743. (tx_exc_metadata &&
  744. tx_exc_metadata->is_tx_sniffer)) {
  745. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  746. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  747. DP_STATS_INC(vdev,
  748. tx_i.dropped.headroom_insufficient, 1);
  749. goto failure;
  750. }
  751. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  752. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  753. "qdf_nbuf_push_head failed");
  754. goto failure;
  755. }
  756. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  757. msdu_info);
  758. if (htt_hdr_size == 0)
  759. goto failure;
  760. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  761. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  762. is_exception = 1;
  763. }
  764. if (qdf_unlikely(vdev->nawds_enabled)) {
  765. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  766. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  767. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  768. is_exception = 1;
  769. }
  770. }
  771. #if !TQM_BYPASS_WAR
  772. if (is_exception || tx_exc_metadata)
  773. #endif
  774. {
  775. /* Temporary WAR due to TQM VP issues */
  776. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  777. qdf_atomic_inc(&pdev->num_tx_exception);
  778. }
  779. return tx_desc;
  780. failure:
  781. dp_tx_desc_release(tx_desc, desc_pool_id);
  782. return NULL;
  783. }
  784. /**
  785. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  786. * @vdev: DP vdev handle
  787. * @nbuf: skb
  788. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  789. * @desc_pool_id : Descriptor Pool ID
  790. *
  791. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  792. * information. For frames wth fragments, allocate and prepare
  793. * an MSDU extension descriptor
  794. *
  795. * Return: Pointer to Tx Descriptor on success,
  796. * NULL on failure
  797. */
  798. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  799. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  800. uint8_t desc_pool_id)
  801. {
  802. struct dp_tx_desc_s *tx_desc;
  803. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  804. struct dp_pdev *pdev = vdev->pdev;
  805. struct dp_soc *soc = pdev->soc;
  806. if (dp_tx_limit_check(vdev))
  807. return NULL;
  808. /* Allocate software Tx descriptor */
  809. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  810. if (!tx_desc) {
  811. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  812. return NULL;
  813. }
  814. dp_tx_outstanding_inc(pdev);
  815. /* Initialize the SW tx descriptor */
  816. tx_desc->nbuf = nbuf;
  817. tx_desc->frm_type = msdu_info->frm_type;
  818. tx_desc->tx_encap_type = vdev->tx_encap_type;
  819. tx_desc->vdev = vdev;
  820. tx_desc->pdev = pdev;
  821. tx_desc->pkt_offset = 0;
  822. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  823. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  824. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  825. /* Handle scattered frames - TSO/SG/ME */
  826. /* Allocate and prepare an extension descriptor for scattered frames */
  827. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  828. if (!msdu_ext_desc) {
  829. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  830. "%s Tx Extension Descriptor Alloc Fail",
  831. __func__);
  832. goto failure;
  833. }
  834. #if TQM_BYPASS_WAR
  835. /* Temporary WAR due to TQM VP issues */
  836. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  837. qdf_atomic_inc(&pdev->num_tx_exception);
  838. #endif
  839. if (qdf_unlikely(msdu_info->exception_fw))
  840. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  841. tx_desc->msdu_ext_desc = msdu_ext_desc;
  842. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  843. return tx_desc;
  844. failure:
  845. dp_tx_desc_release(tx_desc, desc_pool_id);
  846. return NULL;
  847. }
  848. /**
  849. * dp_tx_prepare_raw() - Prepare RAW packet TX
  850. * @vdev: DP vdev handle
  851. * @nbuf: buffer pointer
  852. * @seg_info: Pointer to Segment info Descriptor to be prepared
  853. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  854. * descriptor
  855. *
  856. * Return:
  857. */
  858. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  859. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  860. {
  861. qdf_nbuf_t curr_nbuf = NULL;
  862. uint16_t total_len = 0;
  863. qdf_dma_addr_t paddr;
  864. int32_t i;
  865. int32_t mapped_buf_num = 0;
  866. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  867. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  868. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  869. /* Continue only if frames are of DATA type */
  870. if (!DP_FRAME_IS_DATA(qos_wh)) {
  871. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  872. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  873. "Pkt. recd is of not data type");
  874. goto error;
  875. }
  876. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  877. if (vdev->raw_mode_war &&
  878. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  879. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  880. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  881. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  882. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  883. if (QDF_STATUS_SUCCESS !=
  884. qdf_nbuf_map_nbytes_single(vdev->osdev,
  885. curr_nbuf,
  886. QDF_DMA_TO_DEVICE,
  887. curr_nbuf->len)) {
  888. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  889. "%s dma map error ", __func__);
  890. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  891. mapped_buf_num = i;
  892. goto error;
  893. }
  894. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  895. seg_info->frags[i].paddr_lo = paddr;
  896. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  897. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  898. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  899. total_len += qdf_nbuf_len(curr_nbuf);
  900. }
  901. seg_info->frag_cnt = i;
  902. seg_info->total_len = total_len;
  903. seg_info->next = NULL;
  904. sg_info->curr_seg = seg_info;
  905. msdu_info->frm_type = dp_tx_frm_raw;
  906. msdu_info->num_seg = 1;
  907. return nbuf;
  908. error:
  909. i = 0;
  910. while (nbuf) {
  911. curr_nbuf = nbuf;
  912. if (i < mapped_buf_num) {
  913. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  914. QDF_DMA_TO_DEVICE,
  915. curr_nbuf->len);
  916. i++;
  917. }
  918. nbuf = qdf_nbuf_next(nbuf);
  919. qdf_nbuf_free(curr_nbuf);
  920. }
  921. return NULL;
  922. }
  923. /**
  924. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  925. * @soc: DP soc handle
  926. * @nbuf: Buffer pointer
  927. *
  928. * unmap the chain of nbufs that belong to this RAW frame.
  929. *
  930. * Return: None
  931. */
  932. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  933. qdf_nbuf_t nbuf)
  934. {
  935. qdf_nbuf_t cur_nbuf = nbuf;
  936. do {
  937. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  938. QDF_DMA_TO_DEVICE,
  939. cur_nbuf->len);
  940. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  941. } while (cur_nbuf);
  942. }
  943. #ifdef VDEV_PEER_PROTOCOL_COUNT
  944. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
  945. { \
  946. qdf_nbuf_t nbuf_local; \
  947. struct dp_vdev *vdev_local = vdev_hdl; \
  948. do { \
  949. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  950. break; \
  951. nbuf_local = nbuf; \
  952. if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
  953. htt_cmn_pkt_type_raw)) \
  954. break; \
  955. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
  956. break; \
  957. else if (qdf_nbuf_is_tso((nbuf_local))) \
  958. break; \
  959. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  960. (nbuf_local), \
  961. NULL, 1, 0); \
  962. } while (0); \
  963. }
  964. #else
  965. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
  966. #endif
  967. /**
  968. * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
  969. * @soc: DP Soc Handle
  970. * @vdev: DP vdev handle
  971. * @tx_desc: Tx Descriptor Handle
  972. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  973. * @fw_metadata: Metadata to send to Target Firmware along with frame
  974. * @ring_id: Ring ID of H/W ring to which we enqueue the packet
  975. * @tx_exc_metadata: Handle that holds exception path meta data
  976. *
  977. * Gets the next free TCL HW DMA descriptor and sets up required parameters
  978. * from software Tx descriptor
  979. *
  980. * Return: QDF_STATUS_SUCCESS: success
  981. * QDF_STATUS_E_RESOURCES: Error return
  982. */
  983. static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
  984. struct dp_tx_desc_s *tx_desc, uint8_t tid,
  985. uint16_t fw_metadata, uint8_t ring_id,
  986. struct cdp_tx_exception_metadata
  987. *tx_exc_metadata)
  988. {
  989. uint8_t type;
  990. uint16_t length;
  991. void *hal_tx_desc;
  992. uint32_t *hal_tx_desc_cached;
  993. qdf_dma_addr_t dma_addr;
  994. /*
  995. * Setting it initialization statically here to avoid
  996. * a memset call jump with qdf_mem_set call
  997. */
  998. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  999. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  1000. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  1001. tx_exc_metadata->sec_type : vdev->sec_type);
  1002. /* Return Buffer Manager ID */
  1003. uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
  1004. hal_ring_handle_t hal_ring_hdl = NULL;
  1005. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  1006. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  1007. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  1008. return QDF_STATUS_E_RESOURCES;
  1009. }
  1010. hal_tx_desc_cached = (void *) cached_desc;
  1011. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
  1012. length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1013. type = HAL_TX_BUF_TYPE_EXT_DESC;
  1014. dma_addr = tx_desc->msdu_ext_desc->paddr;
  1015. } else {
  1016. length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
  1017. type = HAL_TX_BUF_TYPE_BUFFER;
  1018. dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  1019. }
  1020. qdf_assert_always(dma_addr);
  1021. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  1022. dma_addr, bm_id, tx_desc->id,
  1023. type);
  1024. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  1025. vdev->lmac_id);
  1026. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  1027. vdev->search_type);
  1028. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  1029. vdev->bss_ast_idx);
  1030. hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
  1031. vdev->dscp_tid_map_id);
  1032. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  1033. sec_type_map[sec_type]);
  1034. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  1035. (vdev->bss_ast_hash & 0xF));
  1036. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  1037. hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
  1038. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  1039. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  1040. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  1041. vdev->hal_desc_addr_search_flags);
  1042. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  1043. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  1044. /* verify checksum offload configuration*/
  1045. if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
  1046. ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
  1047. || qdf_nbuf_is_tso(tx_desc->nbuf))) {
  1048. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  1049. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  1050. }
  1051. if (tid != HTT_TX_EXT_TID_INVALID)
  1052. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  1053. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  1054. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  1055. if (qdf_unlikely(vdev->pdev->delay_stats_flag))
  1056. tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
  1057. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  1058. length, type, (uint64_t)dma_addr,
  1059. tx_desc->pkt_offset, tx_desc->id);
  1060. hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
  1061. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
  1062. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1063. "%s %d : HAL RING Access Failed -- %pK",
  1064. __func__, __LINE__, hal_ring_hdl);
  1065. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1066. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1067. return status;
  1068. }
  1069. /* Sync cached descriptor with HW */
  1070. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  1071. if (qdf_unlikely(!hal_tx_desc)) {
  1072. dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
  1073. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1074. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1075. goto ring_access_fail;
  1076. }
  1077. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  1078. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
  1079. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
  1080. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
  1081. status = QDF_STATUS_SUCCESS;
  1082. ring_access_fail:
  1083. if (hif_pm_runtime_get(soc->hif_handle,
  1084. RTPM_ID_DW_TX_HW_ENQUEUE) == 0) {
  1085. hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
  1086. hif_pm_runtime_put(soc->hif_handle,
  1087. RTPM_ID_DW_TX_HW_ENQUEUE);
  1088. } else {
  1089. hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
  1090. }
  1091. return status;
  1092. }
  1093. /**
  1094. * dp_cce_classify() - Classify the frame based on CCE rules
  1095. * @vdev: DP vdev handle
  1096. * @nbuf: skb
  1097. *
  1098. * Classify frames based on CCE rules
  1099. * Return: bool( true if classified,
  1100. * else false)
  1101. */
  1102. static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1103. {
  1104. qdf_ether_header_t *eh = NULL;
  1105. uint16_t ether_type;
  1106. qdf_llc_t *llcHdr;
  1107. qdf_nbuf_t nbuf_clone = NULL;
  1108. qdf_dot3_qosframe_t *qos_wh = NULL;
  1109. /* for mesh packets don't do any classification */
  1110. if (qdf_unlikely(vdev->mesh_vdev))
  1111. return false;
  1112. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1113. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1114. ether_type = eh->ether_type;
  1115. llcHdr = (qdf_llc_t *)(nbuf->data +
  1116. sizeof(qdf_ether_header_t));
  1117. } else {
  1118. qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1119. /* For encrypted packets don't do any classification */
  1120. if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
  1121. return false;
  1122. if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
  1123. if (qdf_unlikely(
  1124. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
  1125. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
  1126. ether_type = *(uint16_t *)(nbuf->data
  1127. + QDF_IEEE80211_4ADDR_HDR_LEN
  1128. + sizeof(qdf_llc_t)
  1129. - sizeof(ether_type));
  1130. llcHdr = (qdf_llc_t *)(nbuf->data +
  1131. QDF_IEEE80211_4ADDR_HDR_LEN);
  1132. } else {
  1133. ether_type = *(uint16_t *)(nbuf->data
  1134. + QDF_IEEE80211_3ADDR_HDR_LEN
  1135. + sizeof(qdf_llc_t)
  1136. - sizeof(ether_type));
  1137. llcHdr = (qdf_llc_t *)(nbuf->data +
  1138. QDF_IEEE80211_3ADDR_HDR_LEN);
  1139. }
  1140. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
  1141. && (ether_type ==
  1142. qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
  1143. DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
  1144. return true;
  1145. }
  1146. }
  1147. return false;
  1148. }
  1149. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
  1150. ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1151. sizeof(*llcHdr));
  1152. nbuf_clone = qdf_nbuf_clone(nbuf);
  1153. if (qdf_unlikely(nbuf_clone)) {
  1154. qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
  1155. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1156. qdf_nbuf_pull_head(nbuf_clone,
  1157. sizeof(qdf_net_vlanhdr_t));
  1158. }
  1159. }
  1160. } else {
  1161. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1162. nbuf_clone = qdf_nbuf_clone(nbuf);
  1163. if (qdf_unlikely(nbuf_clone)) {
  1164. qdf_nbuf_pull_head(nbuf_clone,
  1165. sizeof(qdf_net_vlanhdr_t));
  1166. }
  1167. }
  1168. }
  1169. if (qdf_unlikely(nbuf_clone))
  1170. nbuf = nbuf_clone;
  1171. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
  1172. || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
  1173. || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
  1174. || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
  1175. || (qdf_nbuf_is_ipv4_pkt(nbuf)
  1176. && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  1177. || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
  1178. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
  1179. if (qdf_unlikely(nbuf_clone))
  1180. qdf_nbuf_free(nbuf_clone);
  1181. return true;
  1182. }
  1183. if (qdf_unlikely(nbuf_clone))
  1184. qdf_nbuf_free(nbuf_clone);
  1185. return false;
  1186. }
  1187. /**
  1188. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1189. * @vdev: DP vdev handle
  1190. * @nbuf: skb
  1191. *
  1192. * Extract the DSCP or PCP information from frame and map into TID value.
  1193. *
  1194. * Return: void
  1195. */
  1196. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1197. struct dp_tx_msdu_info_s *msdu_info)
  1198. {
  1199. uint8_t tos = 0, dscp_tid_override = 0;
  1200. uint8_t *hdr_ptr, *L3datap;
  1201. uint8_t is_mcast = 0;
  1202. qdf_ether_header_t *eh = NULL;
  1203. qdf_ethervlan_header_t *evh = NULL;
  1204. uint16_t ether_type;
  1205. qdf_llc_t *llcHdr;
  1206. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1207. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1208. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1209. eh = (qdf_ether_header_t *)nbuf->data;
  1210. hdr_ptr = eh->ether_dhost;
  1211. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1212. } else {
  1213. qdf_dot3_qosframe_t *qos_wh =
  1214. (qdf_dot3_qosframe_t *) nbuf->data;
  1215. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1216. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1217. return;
  1218. }
  1219. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1220. ether_type = eh->ether_type;
  1221. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1222. /*
  1223. * Check if packet is dot3 or eth2 type.
  1224. */
  1225. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1226. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1227. sizeof(*llcHdr));
  1228. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1229. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1230. sizeof(*llcHdr);
  1231. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1232. + sizeof(*llcHdr) +
  1233. sizeof(qdf_net_vlanhdr_t));
  1234. } else {
  1235. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1236. sizeof(*llcHdr);
  1237. }
  1238. } else {
  1239. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1240. evh = (qdf_ethervlan_header_t *) eh;
  1241. ether_type = evh->ether_type;
  1242. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1243. }
  1244. }
  1245. /*
  1246. * Find priority from IP TOS DSCP field
  1247. */
  1248. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1249. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1250. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1251. /* Only for unicast frames */
  1252. if (!is_mcast) {
  1253. /* send it on VO queue */
  1254. msdu_info->tid = DP_VO_TID;
  1255. }
  1256. } else {
  1257. /*
  1258. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1259. * from TOS byte.
  1260. */
  1261. tos = ip->ip_tos;
  1262. dscp_tid_override = 1;
  1263. }
  1264. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1265. /* TODO
  1266. * use flowlabel
  1267. *igmpmld cases to be handled in phase 2
  1268. */
  1269. unsigned long ver_pri_flowlabel;
  1270. unsigned long pri;
  1271. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1272. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1273. DP_IPV6_PRIORITY_SHIFT;
  1274. tos = pri;
  1275. dscp_tid_override = 1;
  1276. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1277. msdu_info->tid = DP_VO_TID;
  1278. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1279. /* Only for unicast frames */
  1280. if (!is_mcast) {
  1281. /* send ucast arp on VO queue */
  1282. msdu_info->tid = DP_VO_TID;
  1283. }
  1284. }
  1285. /*
  1286. * Assign all MCAST packets to BE
  1287. */
  1288. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1289. if (is_mcast) {
  1290. tos = 0;
  1291. dscp_tid_override = 1;
  1292. }
  1293. }
  1294. if (dscp_tid_override == 1) {
  1295. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1296. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1297. }
  1298. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1299. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1300. return;
  1301. }
  1302. /**
  1303. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1304. * @vdev: DP vdev handle
  1305. * @nbuf: skb
  1306. *
  1307. * Software based TID classification is required when more than 2 DSCP-TID
  1308. * mapping tables are needed.
  1309. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1310. *
  1311. * Return: void
  1312. */
  1313. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1314. struct dp_tx_msdu_info_s *msdu_info)
  1315. {
  1316. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1317. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1318. if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
  1319. return;
  1320. /* for mesh packets don't do any classification */
  1321. if (qdf_unlikely(vdev->mesh_vdev))
  1322. return;
  1323. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1324. }
  1325. #ifdef FEATURE_WLAN_TDLS
  1326. /**
  1327. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1328. * @tx_desc: TX descriptor
  1329. *
  1330. * Return: None
  1331. */
  1332. static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
  1333. {
  1334. if (tx_desc->vdev) {
  1335. if (tx_desc->vdev->is_tdls_frame) {
  1336. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1337. tx_desc->vdev->is_tdls_frame = false;
  1338. }
  1339. }
  1340. }
  1341. /**
  1342. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1343. * @tx_desc: TX descriptor
  1344. * @vdev: datapath vdev handle
  1345. *
  1346. * Return: None
  1347. */
  1348. static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
  1349. struct dp_vdev *vdev)
  1350. {
  1351. struct hal_tx_completion_status ts = {0};
  1352. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1353. if (qdf_unlikely(!vdev)) {
  1354. dp_err("vdev is null!");
  1355. return;
  1356. }
  1357. hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
  1358. if (vdev->tx_non_std_data_callback.func) {
  1359. qdf_nbuf_set_next(tx_desc->nbuf, NULL);
  1360. vdev->tx_non_std_data_callback.func(
  1361. vdev->tx_non_std_data_callback.ctxt,
  1362. nbuf, ts.status);
  1363. return;
  1364. }
  1365. }
  1366. #else
  1367. static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
  1368. {
  1369. }
  1370. static inline void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
  1371. struct dp_vdev *vdev)
  1372. {
  1373. }
  1374. #endif
  1375. /**
  1376. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1377. * @vdev: DP vdev handle
  1378. * @nbuf: skb
  1379. *
  1380. * Return: 1 if frame needs to be dropped else 0
  1381. */
  1382. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1383. {
  1384. struct dp_pdev *pdev = NULL;
  1385. struct dp_ast_entry *src_ast_entry = NULL;
  1386. struct dp_ast_entry *dst_ast_entry = NULL;
  1387. struct dp_soc *soc = NULL;
  1388. qdf_assert(vdev);
  1389. pdev = vdev->pdev;
  1390. qdf_assert(pdev);
  1391. soc = pdev->soc;
  1392. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1393. (soc, dstmac, vdev->pdev->pdev_id);
  1394. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1395. (soc, srcmac, vdev->pdev->pdev_id);
  1396. if (dst_ast_entry && src_ast_entry) {
  1397. if (dst_ast_entry->peer->peer_ids[0] ==
  1398. src_ast_entry->peer->peer_ids[0])
  1399. return 1;
  1400. }
  1401. return 0;
  1402. }
  1403. /**
  1404. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1405. * @vdev: DP vdev handle
  1406. * @nbuf: skb
  1407. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1408. * @meta_data: Metadata to the fw
  1409. * @tx_q: Tx queue to be used for this Tx frame
  1410. * @peer_id: peer_id of the peer in case of NAWDS frames
  1411. * @tx_exc_metadata: Handle that holds exception path metadata
  1412. *
  1413. * Return: NULL on success,
  1414. * nbuf when it fails to send
  1415. */
  1416. qdf_nbuf_t
  1417. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1418. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1419. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1420. {
  1421. struct dp_pdev *pdev = vdev->pdev;
  1422. struct dp_soc *soc = pdev->soc;
  1423. struct dp_tx_desc_s *tx_desc;
  1424. QDF_STATUS status;
  1425. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1426. uint16_t htt_tcl_metadata = 0;
  1427. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  1428. uint8_t tid = msdu_info->tid;
  1429. struct cdp_tid_tx_stats *tid_stats = NULL;
  1430. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1431. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1432. msdu_info, tx_exc_metadata);
  1433. if (!tx_desc) {
  1434. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  1435. vdev, tx_q->desc_pool_id);
  1436. drop_code = TX_DESC_ERR;
  1437. goto fail_return;
  1438. }
  1439. if (qdf_unlikely(soc->cce_disable)) {
  1440. if (dp_cce_classify(vdev, nbuf) == true) {
  1441. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1442. tid = DP_VO_TID;
  1443. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1444. }
  1445. }
  1446. dp_tx_update_tdls_flags(tx_desc);
  1447. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1448. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1449. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1450. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1451. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1452. HTT_TCL_METADATA_TYPE_PEER_BASED);
  1453. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1454. peer_id);
  1455. } else
  1456. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1457. if (msdu_info->exception_fw) {
  1458. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1459. }
  1460. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  1461. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  1462. QDF_DMA_TO_DEVICE, nbuf->len))) {
  1463. /* Handle failure */
  1464. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1465. "qdf_nbuf_map failed");
  1466. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1467. drop_code = TX_DMA_MAP_ERR;
  1468. goto release_desc;
  1469. }
  1470. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1471. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
  1472. htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
  1473. if (status != QDF_STATUS_SUCCESS) {
  1474. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1475. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1476. __func__, tx_desc, tx_q->ring_id);
  1477. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  1478. QDF_DMA_TO_DEVICE,
  1479. nbuf->len);
  1480. drop_code = TX_HW_ENQUEUE;
  1481. goto release_desc;
  1482. }
  1483. return NULL;
  1484. release_desc:
  1485. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1486. fail_return:
  1487. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1488. tid_stats = &pdev->stats.tid_stats.
  1489. tid_tx_stats[tx_q->ring_id][tid];
  1490. tid_stats->swdrop_cnt[drop_code]++;
  1491. return nbuf;
  1492. }
  1493. /**
  1494. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1495. * @vdev: DP vdev handle
  1496. * @nbuf: skb
  1497. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1498. *
  1499. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1500. *
  1501. * Return: NULL on success,
  1502. * nbuf when it fails to send
  1503. */
  1504. #if QDF_LOCK_STATS
  1505. noinline
  1506. #else
  1507. #endif
  1508. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1509. struct dp_tx_msdu_info_s *msdu_info)
  1510. {
  1511. uint8_t i;
  1512. struct dp_pdev *pdev = vdev->pdev;
  1513. struct dp_soc *soc = pdev->soc;
  1514. struct dp_tx_desc_s *tx_desc;
  1515. bool is_cce_classified = false;
  1516. QDF_STATUS status;
  1517. uint16_t htt_tcl_metadata = 0;
  1518. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1519. struct cdp_tid_tx_stats *tid_stats = NULL;
  1520. if (qdf_unlikely(soc->cce_disable)) {
  1521. is_cce_classified = dp_cce_classify(vdev, nbuf);
  1522. if (is_cce_classified) {
  1523. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1524. msdu_info->tid = DP_VO_TID;
  1525. }
  1526. }
  1527. if (msdu_info->frm_type == dp_tx_frm_me)
  1528. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1529. i = 0;
  1530. /* Print statement to track i and num_seg */
  1531. /*
  1532. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1533. * descriptors using information in msdu_info
  1534. */
  1535. while (i < msdu_info->num_seg) {
  1536. /*
  1537. * Setup Tx descriptor for an MSDU, and MSDU extension
  1538. * descriptor
  1539. */
  1540. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1541. tx_q->desc_pool_id);
  1542. if (!tx_desc) {
  1543. if (msdu_info->frm_type == dp_tx_frm_me) {
  1544. dp_tx_me_free_buf(pdev,
  1545. (void *)(msdu_info->u.sg_info
  1546. .curr_seg->frags[0].vaddr));
  1547. i++;
  1548. continue;
  1549. }
  1550. goto done;
  1551. }
  1552. if (msdu_info->frm_type == dp_tx_frm_me) {
  1553. tx_desc->me_buffer =
  1554. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1555. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1556. }
  1557. if (is_cce_classified)
  1558. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1559. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1560. if (msdu_info->exception_fw) {
  1561. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1562. }
  1563. /*
  1564. * Enqueue the Tx MSDU descriptor to HW for transmit
  1565. */
  1566. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
  1567. htt_tcl_metadata, tx_q->ring_id, NULL);
  1568. if (status != QDF_STATUS_SUCCESS) {
  1569. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1570. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1571. __func__, tx_desc, tx_q->ring_id);
  1572. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1573. tid_stats = &pdev->stats.tid_stats.
  1574. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  1575. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  1576. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1577. if (msdu_info->frm_type == dp_tx_frm_me) {
  1578. i++;
  1579. continue;
  1580. }
  1581. goto done;
  1582. }
  1583. /*
  1584. * TODO
  1585. * if tso_info structure can be modified to have curr_seg
  1586. * as first element, following 2 blocks of code (for TSO and SG)
  1587. * can be combined into 1
  1588. */
  1589. /*
  1590. * For frames with multiple segments (TSO, ME), jump to next
  1591. * segment.
  1592. */
  1593. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1594. if (msdu_info->u.tso_info.curr_seg->next) {
  1595. msdu_info->u.tso_info.curr_seg =
  1596. msdu_info->u.tso_info.curr_seg->next;
  1597. /*
  1598. * If this is a jumbo nbuf, then increment the number of
  1599. * nbuf users for each additional segment of the msdu.
  1600. * This will ensure that the skb is freed only after
  1601. * receiving tx completion for all segments of an nbuf
  1602. */
  1603. qdf_nbuf_inc_users(nbuf);
  1604. /* Check with MCL if this is needed */
  1605. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
  1606. }
  1607. }
  1608. /*
  1609. * For Multicast-Unicast converted packets,
  1610. * each converted frame (for a client) is represented as
  1611. * 1 segment
  1612. */
  1613. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  1614. (msdu_info->frm_type == dp_tx_frm_me)) {
  1615. if (msdu_info->u.sg_info.curr_seg->next) {
  1616. msdu_info->u.sg_info.curr_seg =
  1617. msdu_info->u.sg_info.curr_seg->next;
  1618. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1619. }
  1620. }
  1621. i++;
  1622. }
  1623. nbuf = NULL;
  1624. done:
  1625. return nbuf;
  1626. }
  1627. /**
  1628. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  1629. * for SG frames
  1630. * @vdev: DP vdev handle
  1631. * @nbuf: skb
  1632. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1633. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1634. *
  1635. * Return: NULL on success,
  1636. * nbuf when it fails to send
  1637. */
  1638. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1639. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1640. {
  1641. uint32_t cur_frag, nr_frags;
  1642. qdf_dma_addr_t paddr;
  1643. struct dp_tx_sg_info_s *sg_info;
  1644. sg_info = &msdu_info->u.sg_info;
  1645. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  1646. if (QDF_STATUS_SUCCESS !=
  1647. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  1648. QDF_DMA_TO_DEVICE, nbuf->len)) {
  1649. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1650. "dma map error");
  1651. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1652. qdf_nbuf_free(nbuf);
  1653. return NULL;
  1654. }
  1655. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  1656. seg_info->frags[0].paddr_lo = paddr;
  1657. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  1658. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  1659. seg_info->frags[0].vaddr = (void *) nbuf;
  1660. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  1661. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  1662. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  1663. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1664. "frag dma map error");
  1665. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1666. qdf_nbuf_free(nbuf);
  1667. return NULL;
  1668. }
  1669. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  1670. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  1671. seg_info->frags[cur_frag + 1].paddr_hi =
  1672. ((uint64_t) paddr) >> 32;
  1673. seg_info->frags[cur_frag + 1].len =
  1674. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  1675. }
  1676. seg_info->frag_cnt = (cur_frag + 1);
  1677. seg_info->total_len = qdf_nbuf_len(nbuf);
  1678. seg_info->next = NULL;
  1679. sg_info->curr_seg = seg_info;
  1680. msdu_info->frm_type = dp_tx_frm_sg;
  1681. msdu_info->num_seg = 1;
  1682. return nbuf;
  1683. }
  1684. /**
  1685. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  1686. * @vdev: DP vdev handle
  1687. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1688. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  1689. *
  1690. * Return: NULL on failure,
  1691. * nbuf when extracted successfully
  1692. */
  1693. static
  1694. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  1695. struct dp_tx_msdu_info_s *msdu_info,
  1696. uint16_t ppdu_cookie)
  1697. {
  1698. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1699. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1700. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1701. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  1702. (msdu_info->meta_data[5], 1);
  1703. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  1704. (msdu_info->meta_data[5], 1);
  1705. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  1706. (msdu_info->meta_data[6], ppdu_cookie);
  1707. msdu_info->exception_fw = 1;
  1708. msdu_info->is_tx_sniffer = 1;
  1709. }
  1710. #ifdef MESH_MODE_SUPPORT
  1711. /**
  1712. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  1713. and prepare msdu_info for mesh frames.
  1714. * @vdev: DP vdev handle
  1715. * @nbuf: skb
  1716. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1717. *
  1718. * Return: NULL on failure,
  1719. * nbuf when extracted successfully
  1720. */
  1721. static
  1722. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1723. struct dp_tx_msdu_info_s *msdu_info)
  1724. {
  1725. struct meta_hdr_s *mhdr;
  1726. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1727. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1728. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1729. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  1730. msdu_info->exception_fw = 0;
  1731. goto remove_meta_hdr;
  1732. }
  1733. msdu_info->exception_fw = 1;
  1734. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1735. meta_data->host_tx_desc_pool = 1;
  1736. meta_data->update_peer_cache = 1;
  1737. meta_data->learning_frame = 1;
  1738. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  1739. meta_data->power = mhdr->power;
  1740. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  1741. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  1742. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  1743. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  1744. meta_data->dyn_bw = 1;
  1745. meta_data->valid_pwr = 1;
  1746. meta_data->valid_mcs_mask = 1;
  1747. meta_data->valid_nss_mask = 1;
  1748. meta_data->valid_preamble_type = 1;
  1749. meta_data->valid_retries = 1;
  1750. meta_data->valid_bw_info = 1;
  1751. }
  1752. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  1753. meta_data->encrypt_type = 0;
  1754. meta_data->valid_encrypt_type = 1;
  1755. meta_data->learning_frame = 0;
  1756. }
  1757. meta_data->valid_key_flags = 1;
  1758. meta_data->key_flags = (mhdr->keyix & 0x3);
  1759. remove_meta_hdr:
  1760. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  1761. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1762. "qdf_nbuf_pull_head failed");
  1763. qdf_nbuf_free(nbuf);
  1764. return NULL;
  1765. }
  1766. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1767. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1768. "%s , Meta hdr %0x %0x %0x %0x %0x %0x"
  1769. " tid %d to_fw %d",
  1770. __func__, msdu_info->meta_data[0],
  1771. msdu_info->meta_data[1],
  1772. msdu_info->meta_data[2],
  1773. msdu_info->meta_data[3],
  1774. msdu_info->meta_data[4],
  1775. msdu_info->meta_data[5],
  1776. msdu_info->tid, msdu_info->exception_fw);
  1777. return nbuf;
  1778. }
  1779. #else
  1780. static
  1781. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1782. struct dp_tx_msdu_info_s *msdu_info)
  1783. {
  1784. return nbuf;
  1785. }
  1786. #endif
  1787. /**
  1788. * dp_check_exc_metadata() - Checks if parameters are valid
  1789. * @tx_exc - holds all exception path parameters
  1790. *
  1791. * Returns true when all the parameters are valid else false
  1792. *
  1793. */
  1794. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  1795. {
  1796. bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
  1797. HTT_INVALID_TID);
  1798. bool invalid_encap_type =
  1799. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  1800. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  1801. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  1802. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  1803. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  1804. tx_exc->ppdu_cookie == 0);
  1805. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  1806. invalid_cookie) {
  1807. return false;
  1808. }
  1809. return true;
  1810. }
  1811. /**
  1812. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  1813. * @soc: DP soc handle
  1814. * @vdev_id: id of DP vdev handle
  1815. * @nbuf: skb
  1816. * @tx_exc_metadata: Handle that holds exception path meta data
  1817. *
  1818. * Entry point for Core Tx layer (DP_TX) invoked from
  1819. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  1820. *
  1821. * Return: NULL on success,
  1822. * nbuf when it fails to send
  1823. */
  1824. qdf_nbuf_t
  1825. dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf,
  1826. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1827. {
  1828. qdf_ether_header_t *eh = NULL;
  1829. struct dp_tx_msdu_info_s msdu_info;
  1830. struct dp_vdev *vdev =
  1831. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  1832. vdev_id);
  1833. if (qdf_unlikely(!vdev))
  1834. goto fail;
  1835. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  1836. if (!tx_exc_metadata)
  1837. goto fail;
  1838. msdu_info.tid = tx_exc_metadata->tid;
  1839. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1840. dp_verbose_debug("skb %pM", nbuf->data);
  1841. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  1842. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  1843. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1844. "Invalid parameters in exception path");
  1845. goto fail;
  1846. }
  1847. /* Basic sanity checks for unsupported packets */
  1848. /* MESH mode */
  1849. if (qdf_unlikely(vdev->mesh_vdev)) {
  1850. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1851. "Mesh mode is not supported in exception path");
  1852. goto fail;
  1853. }
  1854. /* TSO or SG */
  1855. if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
  1856. qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  1857. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1858. "TSO and SG are not supported in exception path");
  1859. goto fail;
  1860. }
  1861. /* RAW */
  1862. if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
  1863. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1864. "Raw frame is not supported in exception path");
  1865. goto fail;
  1866. }
  1867. /* Mcast enhancement*/
  1868. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  1869. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  1870. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  1871. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1872. "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
  1873. }
  1874. }
  1875. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  1876. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  1877. qdf_nbuf_len(nbuf));
  1878. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  1879. tx_exc_metadata->ppdu_cookie);
  1880. }
  1881. /*
  1882. * Get HW Queue to use for this frame.
  1883. * TCL supports upto 4 DMA rings, out of which 3 rings are
  1884. * dedicated for data and 1 for command.
  1885. * "queue_id" maps to one hardware ring.
  1886. * With each ring, we also associate a unique Tx descriptor pool
  1887. * to minimize lock contention for these resources.
  1888. */
  1889. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1890. /* Single linear frame */
  1891. /*
  1892. * If nbuf is a simple linear frame, use send_single function to
  1893. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  1894. * SRNG. There is no need to setup a MSDU extension descriptor.
  1895. */
  1896. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  1897. tx_exc_metadata->peer_id, tx_exc_metadata);
  1898. return nbuf;
  1899. fail:
  1900. dp_verbose_debug("pkt send failed");
  1901. return nbuf;
  1902. }
  1903. /**
  1904. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  1905. * @soc: DP soc handle
  1906. * @vdev_id: DP vdev handle
  1907. * @nbuf: skb
  1908. *
  1909. * Entry point for Core Tx layer (DP_TX) invoked from
  1910. * hard_start_xmit in OSIF/HDD
  1911. *
  1912. * Return: NULL on success,
  1913. * nbuf when it fails to send
  1914. */
  1915. #ifdef MESH_MODE_SUPPORT
  1916. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  1917. qdf_nbuf_t nbuf)
  1918. {
  1919. struct meta_hdr_s *mhdr;
  1920. qdf_nbuf_t nbuf_mesh = NULL;
  1921. qdf_nbuf_t nbuf_clone = NULL;
  1922. struct dp_vdev *vdev;
  1923. uint8_t no_enc_frame = 0;
  1924. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  1925. if (!nbuf_mesh) {
  1926. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1927. "qdf_nbuf_unshare failed");
  1928. return nbuf;
  1929. }
  1930. vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  1931. vdev_id);
  1932. if (!vdev) {
  1933. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1934. "vdev is NULL for vdev_id %d", vdev_id);
  1935. return nbuf;
  1936. }
  1937. nbuf = nbuf_mesh;
  1938. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1939. if ((vdev->sec_type != cdp_sec_type_none) &&
  1940. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  1941. no_enc_frame = 1;
  1942. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  1943. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  1944. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  1945. !no_enc_frame) {
  1946. nbuf_clone = qdf_nbuf_clone(nbuf);
  1947. if (!nbuf_clone) {
  1948. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1949. "qdf_nbuf_clone failed");
  1950. return nbuf;
  1951. }
  1952. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  1953. }
  1954. if (nbuf_clone) {
  1955. if (!dp_tx_send(soc, vdev_id, nbuf_clone)) {
  1956. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  1957. } else {
  1958. qdf_nbuf_free(nbuf_clone);
  1959. }
  1960. }
  1961. if (no_enc_frame)
  1962. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  1963. else
  1964. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  1965. nbuf = dp_tx_send(soc, vdev_id, nbuf);
  1966. if ((!nbuf) && no_enc_frame) {
  1967. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  1968. }
  1969. return nbuf;
  1970. }
  1971. #else
  1972. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  1973. qdf_nbuf_t nbuf)
  1974. {
  1975. return dp_tx_send(soc, vdev_id, nbuf);
  1976. }
  1977. #endif
  1978. /**
  1979. * dp_tx_send() - Transmit a frame on a given VAP
  1980. * @soc: DP soc handle
  1981. * @vdev_id: id of DP vdev handle
  1982. * @nbuf: skb
  1983. *
  1984. * Entry point for Core Tx layer (DP_TX) invoked from
  1985. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  1986. * cases
  1987. *
  1988. * Return: NULL on success,
  1989. * nbuf when it fails to send
  1990. */
  1991. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf)
  1992. {
  1993. uint16_t peer_id = HTT_INVALID_PEER;
  1994. /*
  1995. * doing a memzero is causing additional function call overhead
  1996. * so doing static stack clearing
  1997. */
  1998. struct dp_tx_msdu_info_s msdu_info = {0};
  1999. struct dp_vdev *vdev =
  2000. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  2001. vdev_id);
  2002. if (qdf_unlikely(!vdev))
  2003. return nbuf;
  2004. dp_verbose_debug("skb %pM", nbuf->data);
  2005. /*
  2006. * Set Default Host TID value to invalid TID
  2007. * (TID override disabled)
  2008. */
  2009. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  2010. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2011. if (qdf_unlikely(vdev->mesh_vdev)) {
  2012. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  2013. &msdu_info);
  2014. if (!nbuf_mesh) {
  2015. dp_verbose_debug("Extracting mesh metadata failed");
  2016. return nbuf;
  2017. }
  2018. nbuf = nbuf_mesh;
  2019. }
  2020. /*
  2021. * Get HW Queue to use for this frame.
  2022. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2023. * dedicated for data and 1 for command.
  2024. * "queue_id" maps to one hardware ring.
  2025. * With each ring, we also associate a unique Tx descriptor pool
  2026. * to minimize lock contention for these resources.
  2027. */
  2028. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2029. /*
  2030. * TCL H/W supports 2 DSCP-TID mapping tables.
  2031. * Table 1 - Default DSCP-TID mapping table
  2032. * Table 2 - 1 DSCP-TID override table
  2033. *
  2034. * If we need a different DSCP-TID mapping for this vap,
  2035. * call tid_classify to extract DSCP/ToS from frame and
  2036. * map to a TID and store in msdu_info. This is later used
  2037. * to fill in TCL Input descriptor (per-packet TID override).
  2038. */
  2039. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  2040. /*
  2041. * Classify the frame and call corresponding
  2042. * "prepare" function which extracts the segment (TSO)
  2043. * and fragmentation information (for TSO , SG, ME, or Raw)
  2044. * into MSDU_INFO structure which is later used to fill
  2045. * SW and HW descriptors.
  2046. */
  2047. if (qdf_nbuf_is_tso(nbuf)) {
  2048. dp_verbose_debug("TSO frame %pK", vdev);
  2049. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2050. qdf_nbuf_len(nbuf));
  2051. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2052. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2053. qdf_nbuf_len(nbuf));
  2054. return nbuf;
  2055. }
  2056. goto send_multiple;
  2057. }
  2058. /* SG */
  2059. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2060. struct dp_tx_seg_info_s seg_info = {0};
  2061. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2062. if (!nbuf)
  2063. return NULL;
  2064. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2065. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2066. qdf_nbuf_len(nbuf));
  2067. goto send_multiple;
  2068. }
  2069. #ifdef ATH_SUPPORT_IQUE
  2070. /* Mcast to Ucast Conversion*/
  2071. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  2072. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2073. qdf_nbuf_data(nbuf);
  2074. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2075. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2076. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2077. DP_STATS_INC_PKT(vdev,
  2078. tx_i.mcast_en.mcast_pkt, 1,
  2079. qdf_nbuf_len(nbuf));
  2080. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2081. QDF_STATUS_SUCCESS) {
  2082. return NULL;
  2083. }
  2084. }
  2085. }
  2086. #endif
  2087. /* RAW */
  2088. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  2089. struct dp_tx_seg_info_s seg_info = {0};
  2090. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  2091. if (!nbuf)
  2092. return NULL;
  2093. dp_verbose_debug("Raw frame %pK", vdev);
  2094. goto send_multiple;
  2095. }
  2096. /* Single linear frame */
  2097. /*
  2098. * If nbuf is a simple linear frame, use send_single function to
  2099. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2100. * SRNG. There is no need to setup a MSDU extension descriptor.
  2101. */
  2102. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  2103. return nbuf;
  2104. send_multiple:
  2105. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2106. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  2107. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  2108. return nbuf;
  2109. }
  2110. /**
  2111. * dp_tx_reinject_handler() - Tx Reinject Handler
  2112. * @tx_desc: software descriptor head pointer
  2113. * @status : Tx completion status from HTT descriptor
  2114. *
  2115. * This function reinjects frames back to Target.
  2116. * Todo - Host queue needs to be added
  2117. *
  2118. * Return: none
  2119. */
  2120. static
  2121. void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  2122. {
  2123. struct dp_vdev *vdev;
  2124. struct dp_peer *peer = NULL;
  2125. uint32_t peer_id = HTT_INVALID_PEER;
  2126. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2127. qdf_nbuf_t nbuf_copy = NULL;
  2128. struct dp_tx_msdu_info_s msdu_info;
  2129. struct dp_peer *sa_peer = NULL;
  2130. struct dp_ast_entry *ast_entry = NULL;
  2131. struct dp_soc *soc = NULL;
  2132. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2133. #ifdef WDS_VENDOR_EXTENSION
  2134. int is_mcast = 0, is_ucast = 0;
  2135. int num_peers_3addr = 0;
  2136. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  2137. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  2138. #endif
  2139. vdev = tx_desc->vdev;
  2140. soc = vdev->pdev->soc;
  2141. qdf_assert(vdev);
  2142. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2143. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2144. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2145. "%s Tx reinject path", __func__);
  2146. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  2147. qdf_nbuf_len(tx_desc->nbuf));
  2148. qdf_spin_lock_bh(&(soc->ast_lock));
  2149. ast_entry = dp_peer_ast_hash_find_by_pdevid
  2150. (soc,
  2151. (uint8_t *)(eh->ether_shost),
  2152. vdev->pdev->pdev_id);
  2153. if (ast_entry)
  2154. sa_peer = ast_entry->peer;
  2155. qdf_spin_unlock_bh(&(soc->ast_lock));
  2156. #ifdef WDS_VENDOR_EXTENSION
  2157. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  2158. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  2159. } else {
  2160. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  2161. }
  2162. is_ucast = !is_mcast;
  2163. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2164. if (peer->bss_peer)
  2165. continue;
  2166. /* Detect wds peers that use 3-addr framing for mcast.
  2167. * if there are any, the bss_peer is used to send the
  2168. * the mcast frame using 3-addr format. all wds enabled
  2169. * peers that use 4-addr framing for mcast frames will
  2170. * be duplicated and sent as 4-addr frames below.
  2171. */
  2172. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  2173. num_peers_3addr = 1;
  2174. break;
  2175. }
  2176. }
  2177. #endif
  2178. if (qdf_unlikely(vdev->mesh_vdev)) {
  2179. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  2180. } else {
  2181. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2182. if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
  2183. #ifdef WDS_VENDOR_EXTENSION
  2184. /*
  2185. * . if 3-addr STA, then send on BSS Peer
  2186. * . if Peer WDS enabled and accept 4-addr mcast,
  2187. * send mcast on that peer only
  2188. * . if Peer WDS enabled and accept 4-addr ucast,
  2189. * send ucast on that peer only
  2190. */
  2191. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  2192. (peer->wds_enabled &&
  2193. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  2194. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  2195. #else
  2196. ((peer->bss_peer &&
  2197. !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
  2198. peer->nawds_enabled)) {
  2199. #endif
  2200. peer_id = DP_INVALID_PEER;
  2201. if (peer->nawds_enabled) {
  2202. peer_id = peer->peer_ids[0];
  2203. if (sa_peer == peer) {
  2204. QDF_TRACE(
  2205. QDF_MODULE_ID_DP,
  2206. QDF_TRACE_LEVEL_DEBUG,
  2207. " %s: multicast packet",
  2208. __func__);
  2209. DP_STATS_INC(peer,
  2210. tx.nawds_mcast_drop, 1);
  2211. continue;
  2212. }
  2213. }
  2214. nbuf_copy = qdf_nbuf_copy(nbuf);
  2215. if (!nbuf_copy) {
  2216. QDF_TRACE(QDF_MODULE_ID_DP,
  2217. QDF_TRACE_LEVEL_DEBUG,
  2218. FL("nbuf copy failed"));
  2219. break;
  2220. }
  2221. nbuf_copy = dp_tx_send_msdu_single(vdev,
  2222. nbuf_copy,
  2223. &msdu_info,
  2224. peer_id,
  2225. NULL);
  2226. if (nbuf_copy) {
  2227. QDF_TRACE(QDF_MODULE_ID_DP,
  2228. QDF_TRACE_LEVEL_DEBUG,
  2229. FL("pkt send failed"));
  2230. qdf_nbuf_free(nbuf_copy);
  2231. } else {
  2232. if (peer_id != DP_INVALID_PEER)
  2233. DP_STATS_INC_PKT(peer,
  2234. tx.nawds_mcast,
  2235. 1, qdf_nbuf_len(nbuf));
  2236. }
  2237. }
  2238. }
  2239. }
  2240. if (vdev->nawds_enabled) {
  2241. peer_id = DP_INVALID_PEER;
  2242. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2243. 1, qdf_nbuf_len(nbuf));
  2244. nbuf = dp_tx_send_msdu_single(vdev,
  2245. nbuf,
  2246. &msdu_info,
  2247. peer_id, NULL);
  2248. if (nbuf) {
  2249. QDF_TRACE(QDF_MODULE_ID_DP,
  2250. QDF_TRACE_LEVEL_DEBUG,
  2251. FL("pkt send failed"));
  2252. qdf_nbuf_free(nbuf);
  2253. }
  2254. } else
  2255. qdf_nbuf_free(nbuf);
  2256. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2257. }
  2258. /**
  2259. * dp_tx_inspect_handler() - Tx Inspect Handler
  2260. * @tx_desc: software descriptor head pointer
  2261. * @status : Tx completion status from HTT descriptor
  2262. *
  2263. * Handles Tx frames sent back to Host for inspection
  2264. * (ProxyARP)
  2265. *
  2266. * Return: none
  2267. */
  2268. static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  2269. {
  2270. struct dp_soc *soc;
  2271. struct dp_pdev *pdev = tx_desc->pdev;
  2272. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2273. "%s Tx inspect path",
  2274. __func__);
  2275. qdf_assert(pdev);
  2276. soc = pdev->soc;
  2277. DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
  2278. qdf_nbuf_len(tx_desc->nbuf));
  2279. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  2280. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2281. }
  2282. #ifdef FEATURE_PERPKT_INFO
  2283. /**
  2284. * dp_get_completion_indication_for_stack() - send completion to stack
  2285. * @soc : dp_soc handle
  2286. * @pdev: dp_pdev handle
  2287. * @peer: dp peer handle
  2288. * @ts: transmit completion status structure
  2289. * @netbuf: Buffer pointer for free
  2290. *
  2291. * This function is used for indication whether buffer needs to be
  2292. * sent to stack for freeing or not
  2293. */
  2294. QDF_STATUS
  2295. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2296. struct dp_pdev *pdev,
  2297. struct dp_peer *peer,
  2298. struct hal_tx_completion_status *ts,
  2299. qdf_nbuf_t netbuf,
  2300. uint64_t time_latency)
  2301. {
  2302. struct tx_capture_hdr *ppdu_hdr;
  2303. uint16_t peer_id = ts->peer_id;
  2304. uint32_t ppdu_id = ts->ppdu_id;
  2305. uint8_t first_msdu = ts->first_msdu;
  2306. uint8_t last_msdu = ts->last_msdu;
  2307. if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  2308. !pdev->latency_capture_enable))
  2309. return QDF_STATUS_E_NOSUPPORT;
  2310. if (!peer) {
  2311. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2312. FL("Peer Invalid"));
  2313. return QDF_STATUS_E_INVAL;
  2314. }
  2315. if (pdev->mcopy_mode) {
  2316. if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  2317. (pdev->m_copy_id.tx_peer_id == peer_id)) {
  2318. return QDF_STATUS_E_INVAL;
  2319. }
  2320. pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  2321. pdev->m_copy_id.tx_peer_id = peer_id;
  2322. }
  2323. if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
  2324. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2325. FL("No headroom"));
  2326. return QDF_STATUS_E_NOMEM;
  2327. }
  2328. ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
  2329. qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
  2330. QDF_MAC_ADDR_SIZE);
  2331. qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
  2332. QDF_MAC_ADDR_SIZE);
  2333. ppdu_hdr->ppdu_id = ppdu_id;
  2334. ppdu_hdr->peer_id = peer_id;
  2335. ppdu_hdr->first_msdu = first_msdu;
  2336. ppdu_hdr->last_msdu = last_msdu;
  2337. if (qdf_unlikely(pdev->latency_capture_enable)) {
  2338. ppdu_hdr->tsf = ts->tsf;
  2339. ppdu_hdr->time_latency = time_latency;
  2340. }
  2341. return QDF_STATUS_SUCCESS;
  2342. }
  2343. /**
  2344. * dp_send_completion_to_stack() - send completion to stack
  2345. * @soc : dp_soc handle
  2346. * @pdev: dp_pdev handle
  2347. * @peer_id: peer_id of the peer for which completion came
  2348. * @ppdu_id: ppdu_id
  2349. * @netbuf: Buffer pointer for free
  2350. *
  2351. * This function is used to send completion to stack
  2352. * to free buffer
  2353. */
  2354. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  2355. uint16_t peer_id, uint32_t ppdu_id,
  2356. qdf_nbuf_t netbuf)
  2357. {
  2358. dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
  2359. netbuf, peer_id,
  2360. WDI_NO_VAL, pdev->pdev_id);
  2361. }
  2362. #else
  2363. static QDF_STATUS
  2364. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2365. struct dp_pdev *pdev,
  2366. struct dp_peer *peer,
  2367. struct hal_tx_completion_status *ts,
  2368. qdf_nbuf_t netbuf,
  2369. uint64_t time_latency)
  2370. {
  2371. return QDF_STATUS_E_NOSUPPORT;
  2372. }
  2373. static void
  2374. dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  2375. uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
  2376. {
  2377. }
  2378. #endif
  2379. /**
  2380. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  2381. * @soc: Soc handle
  2382. * @desc: software Tx descriptor to be processed
  2383. *
  2384. * Return: none
  2385. */
  2386. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  2387. struct dp_tx_desc_s *desc)
  2388. {
  2389. struct dp_vdev *vdev = desc->vdev;
  2390. qdf_nbuf_t nbuf = desc->nbuf;
  2391. /* nbuf already freed in vdev detach path */
  2392. if (!nbuf)
  2393. return;
  2394. /* If it is TDLS mgmt, don't unmap or free the frame */
  2395. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  2396. return dp_non_std_tx_comp_free_buff(desc, vdev);
  2397. /* 0 : MSDU buffer, 1 : MLE */
  2398. if (desc->msdu_ext_desc) {
  2399. /* TSO free */
  2400. if (hal_tx_ext_desc_get_tso_enable(
  2401. desc->msdu_ext_desc->vaddr)) {
  2402. /* unmap eash TSO seg before free the nbuf */
  2403. dp_tx_tso_unmap_segment(soc, desc->tso_desc,
  2404. desc->tso_num_desc);
  2405. qdf_nbuf_free(nbuf);
  2406. return;
  2407. }
  2408. }
  2409. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2410. QDF_DMA_TO_DEVICE, nbuf->len);
  2411. if (qdf_unlikely(!vdev)) {
  2412. qdf_nbuf_free(nbuf);
  2413. return;
  2414. }
  2415. if (qdf_likely(!vdev->mesh_vdev))
  2416. qdf_nbuf_free(nbuf);
  2417. else {
  2418. if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  2419. qdf_nbuf_free(nbuf);
  2420. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  2421. } else
  2422. vdev->osif_tx_free_ext((nbuf));
  2423. }
  2424. }
  2425. #ifdef MESH_MODE_SUPPORT
  2426. /**
  2427. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  2428. * in mesh meta header
  2429. * @tx_desc: software descriptor head pointer
  2430. * @ts: pointer to tx completion stats
  2431. * Return: none
  2432. */
  2433. static
  2434. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2435. struct hal_tx_completion_status *ts)
  2436. {
  2437. struct meta_hdr_s *mhdr;
  2438. qdf_nbuf_t netbuf = tx_desc->nbuf;
  2439. if (!tx_desc->msdu_ext_desc) {
  2440. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  2441. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2442. "netbuf %pK offset %d",
  2443. netbuf, tx_desc->pkt_offset);
  2444. return;
  2445. }
  2446. }
  2447. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2448. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2449. "netbuf %pK offset %lu", netbuf,
  2450. sizeof(struct meta_hdr_s));
  2451. return;
  2452. }
  2453. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  2454. mhdr->rssi = ts->ack_frame_rssi;
  2455. mhdr->band = tx_desc->pdev->operating_channel.band;
  2456. mhdr->channel = tx_desc->pdev->operating_channel.num;
  2457. }
  2458. #else
  2459. static
  2460. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2461. struct hal_tx_completion_status *ts)
  2462. {
  2463. }
  2464. #endif
  2465. /**
  2466. * dp_tx_compute_delay() - Compute and fill in all timestamps
  2467. * to pass in correct fields
  2468. *
  2469. * @vdev: pdev handle
  2470. * @tx_desc: tx descriptor
  2471. * @tid: tid value
  2472. * @ring_id: TCL or WBM ring number for transmit path
  2473. * Return: none
  2474. */
  2475. static void dp_tx_compute_delay(struct dp_vdev *vdev,
  2476. struct dp_tx_desc_s *tx_desc,
  2477. uint8_t tid, uint8_t ring_id)
  2478. {
  2479. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  2480. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  2481. if (qdf_likely(!vdev->pdev->delay_stats_flag))
  2482. return;
  2483. current_timestamp = qdf_ktime_to_ms(qdf_ktime_get());
  2484. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  2485. timestamp_hw_enqueue = tx_desc->timestamp;
  2486. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  2487. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  2488. timestamp_hw_enqueue);
  2489. interframe_delay = (uint32_t)(timestamp_ingress -
  2490. vdev->prev_tx_enq_tstamp);
  2491. /*
  2492. * Delay in software enqueue
  2493. */
  2494. dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
  2495. CDP_DELAY_STATS_SW_ENQ, ring_id);
  2496. /*
  2497. * Delay between packet enqueued to HW and Tx completion
  2498. */
  2499. dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
  2500. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
  2501. /*
  2502. * Update interframe delay stats calculated at hardstart receive point.
  2503. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  2504. * interframe delay will not be calculate correctly for 1st frame.
  2505. * On the other side, this will help in avoiding extra per packet check
  2506. * of !vdev->prev_tx_enq_tstamp.
  2507. */
  2508. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  2509. CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
  2510. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  2511. }
  2512. #ifdef DISABLE_DP_STATS
  2513. static
  2514. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  2515. {
  2516. }
  2517. #else
  2518. static
  2519. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  2520. {
  2521. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  2522. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  2523. if (subtype != QDF_PROTO_INVALID)
  2524. DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
  2525. }
  2526. #endif
  2527. /**
  2528. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  2529. * per wbm ring
  2530. *
  2531. * @tx_desc: software descriptor head pointer
  2532. * @ts: Tx completion status
  2533. * @peer: peer handle
  2534. * @ring_id: ring number
  2535. *
  2536. * Return: None
  2537. */
  2538. static inline void
  2539. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  2540. struct hal_tx_completion_status *ts,
  2541. struct dp_peer *peer, uint8_t ring_id)
  2542. {
  2543. struct dp_pdev *pdev = peer->vdev->pdev;
  2544. struct dp_soc *soc = NULL;
  2545. uint8_t mcs, pkt_type;
  2546. uint8_t tid = ts->tid;
  2547. uint32_t length;
  2548. struct cdp_tid_tx_stats *tid_stats;
  2549. if (!pdev)
  2550. return;
  2551. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  2552. tid = CDP_MAX_DATA_TIDS - 1;
  2553. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  2554. soc = pdev->soc;
  2555. mcs = ts->mcs;
  2556. pkt_type = ts->pkt_type;
  2557. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  2558. dp_err("Release source is not from TQM");
  2559. return;
  2560. }
  2561. length = qdf_nbuf_len(tx_desc->nbuf);
  2562. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  2563. if (qdf_unlikely(pdev->delay_stats_flag))
  2564. dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
  2565. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  2566. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  2567. DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
  2568. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  2569. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  2570. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  2571. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  2572. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  2573. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  2574. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  2575. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  2576. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  2577. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  2578. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  2579. /*
  2580. * tx_failed is ideally supposed to be updated from HTT ppdu completion
  2581. * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
  2582. * are no completions for failed cases. Hence updating tx_failed from
  2583. * data path. Please note that if tx_failed is fixed to be from ppdu,
  2584. * then this has to be removed
  2585. */
  2586. peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
  2587. peer->stats.tx.dropped.fw_rem_notx +
  2588. peer->stats.tx.dropped.fw_rem_tx +
  2589. peer->stats.tx.dropped.age_out +
  2590. peer->stats.tx.dropped.fw_reason1 +
  2591. peer->stats.tx.dropped.fw_reason2 +
  2592. peer->stats.tx.dropped.fw_reason3;
  2593. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  2594. tid_stats->tqm_status_cnt[ts->status]++;
  2595. }
  2596. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  2597. dp_update_no_ack_stats(tx_desc->nbuf, peer);
  2598. return;
  2599. }
  2600. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  2601. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  2602. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  2603. /*
  2604. * Following Rate Statistics are updated from HTT PPDU events from FW.
  2605. * Return from here if HTT PPDU events are enabled.
  2606. */
  2607. if (!(soc->process_tx_status))
  2608. return;
  2609. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2610. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  2611. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2612. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  2613. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2614. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2615. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2616. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2617. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2618. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2619. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2620. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2621. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2622. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2623. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2624. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2625. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2626. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2627. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2628. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2629. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  2630. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  2631. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  2632. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  2633. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  2634. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  2635. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  2636. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  2637. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  2638. &peer->stats, ts->peer_id,
  2639. UPDATE_PEER_STATS, pdev->pdev_id);
  2640. #endif
  2641. }
  2642. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  2643. /**
  2644. * dp_tx_flow_pool_lock() - take flow pool lock
  2645. * @soc: core txrx main context
  2646. * @tx_desc: tx desc
  2647. *
  2648. * Return: None
  2649. */
  2650. static inline
  2651. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  2652. struct dp_tx_desc_s *tx_desc)
  2653. {
  2654. struct dp_tx_desc_pool_s *pool;
  2655. uint8_t desc_pool_id;
  2656. desc_pool_id = tx_desc->pool_id;
  2657. pool = &soc->tx_desc[desc_pool_id];
  2658. qdf_spin_lock_bh(&pool->flow_pool_lock);
  2659. }
  2660. /**
  2661. * dp_tx_flow_pool_unlock() - release flow pool lock
  2662. * @soc: core txrx main context
  2663. * @tx_desc: tx desc
  2664. *
  2665. * Return: None
  2666. */
  2667. static inline
  2668. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  2669. struct dp_tx_desc_s *tx_desc)
  2670. {
  2671. struct dp_tx_desc_pool_s *pool;
  2672. uint8_t desc_pool_id;
  2673. desc_pool_id = tx_desc->pool_id;
  2674. pool = &soc->tx_desc[desc_pool_id];
  2675. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  2676. }
  2677. #else
  2678. static inline
  2679. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  2680. {
  2681. }
  2682. static inline
  2683. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  2684. {
  2685. }
  2686. #endif
  2687. /**
  2688. * dp_tx_notify_completion() - Notify tx completion for this desc
  2689. * @soc: core txrx main context
  2690. * @tx_desc: tx desc
  2691. * @netbuf: buffer
  2692. *
  2693. * Return: none
  2694. */
  2695. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  2696. struct dp_tx_desc_s *tx_desc,
  2697. qdf_nbuf_t netbuf)
  2698. {
  2699. void *osif_dev;
  2700. ol_txrx_completion_fp tx_compl_cbk = NULL;
  2701. qdf_assert(tx_desc);
  2702. dp_tx_flow_pool_lock(soc, tx_desc);
  2703. if (!tx_desc->vdev ||
  2704. !tx_desc->vdev->osif_vdev) {
  2705. dp_tx_flow_pool_unlock(soc, tx_desc);
  2706. return;
  2707. }
  2708. osif_dev = tx_desc->vdev->osif_vdev;
  2709. tx_compl_cbk = tx_desc->vdev->tx_comp;
  2710. dp_tx_flow_pool_unlock(soc, tx_desc);
  2711. if (tx_compl_cbk)
  2712. tx_compl_cbk(netbuf, osif_dev);
  2713. }
  2714. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  2715. * @pdev: pdev handle
  2716. * @tid: tid value
  2717. * @txdesc_ts: timestamp from txdesc
  2718. * @ppdu_id: ppdu id
  2719. *
  2720. * Return: none
  2721. */
  2722. #ifdef FEATURE_PERPKT_INFO
  2723. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  2724. struct dp_peer *peer,
  2725. uint8_t tid,
  2726. uint64_t txdesc_ts,
  2727. uint32_t ppdu_id)
  2728. {
  2729. uint64_t delta_ms;
  2730. struct cdp_tx_sojourn_stats *sojourn_stats;
  2731. if (qdf_unlikely(pdev->enhanced_stats_en == 0))
  2732. return;
  2733. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  2734. tid >= CDP_DATA_TID_MAX))
  2735. return;
  2736. if (qdf_unlikely(!pdev->sojourn_buf))
  2737. return;
  2738. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  2739. qdf_nbuf_data(pdev->sojourn_buf);
  2740. sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
  2741. delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
  2742. txdesc_ts;
  2743. qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
  2744. delta_ms);
  2745. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  2746. sojourn_stats->num_msdus[tid] = 1;
  2747. sojourn_stats->avg_sojourn_msdu[tid].internal =
  2748. peer->avg_sojourn_msdu[tid].internal;
  2749. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  2750. pdev->sojourn_buf, HTT_INVALID_PEER,
  2751. WDI_NO_VAL, pdev->pdev_id);
  2752. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  2753. sojourn_stats->num_msdus[tid] = 0;
  2754. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  2755. }
  2756. #else
  2757. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  2758. struct dp_peer *peer,
  2759. uint8_t tid,
  2760. uint64_t txdesc_ts,
  2761. uint32_t ppdu_id)
  2762. {
  2763. }
  2764. #endif
  2765. /**
  2766. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  2767. * @soc: DP Soc handle
  2768. * @tx_desc: software Tx descriptor
  2769. * @ts : Tx completion status from HAL/HTT descriptor
  2770. *
  2771. * Return: none
  2772. */
  2773. static inline void
  2774. dp_tx_comp_process_desc(struct dp_soc *soc,
  2775. struct dp_tx_desc_s *desc,
  2776. struct hal_tx_completion_status *ts,
  2777. struct dp_peer *peer)
  2778. {
  2779. uint64_t time_latency = 0;
  2780. /*
  2781. * m_copy/tx_capture modes are not supported for
  2782. * scatter gather packets
  2783. */
  2784. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  2785. time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) -
  2786. desc->timestamp);
  2787. }
  2788. if (!(desc->msdu_ext_desc)) {
  2789. if (QDF_STATUS_SUCCESS ==
  2790. dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
  2791. return;
  2792. }
  2793. if (QDF_STATUS_SUCCESS ==
  2794. dp_get_completion_indication_for_stack(soc,
  2795. desc->pdev,
  2796. peer, ts,
  2797. desc->nbuf,
  2798. time_latency)) {
  2799. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  2800. QDF_DMA_TO_DEVICE,
  2801. desc->nbuf->len);
  2802. dp_send_completion_to_stack(soc,
  2803. desc->pdev,
  2804. ts->peer_id,
  2805. ts->ppdu_id,
  2806. desc->nbuf);
  2807. return;
  2808. }
  2809. }
  2810. dp_tx_comp_free_buf(soc, desc);
  2811. }
  2812. /**
  2813. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  2814. * @tx_desc: software descriptor head pointer
  2815. * @ts: Tx completion status
  2816. * @peer: peer handle
  2817. * @ring_id: ring number
  2818. *
  2819. * Return: none
  2820. */
  2821. static inline
  2822. void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
  2823. struct hal_tx_completion_status *ts,
  2824. struct dp_peer *peer, uint8_t ring_id)
  2825. {
  2826. uint32_t length;
  2827. qdf_ether_header_t *eh;
  2828. struct dp_soc *soc = NULL;
  2829. struct dp_vdev *vdev = tx_desc->vdev;
  2830. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2831. if (!vdev || !nbuf) {
  2832. dp_info_rl("invalid tx descriptor. vdev or nbuf NULL");
  2833. goto out;
  2834. }
  2835. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2836. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  2837. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  2838. QDF_TRACE_DEFAULT_PDEV_ID,
  2839. qdf_nbuf_data_addr(nbuf),
  2840. sizeof(qdf_nbuf_data(nbuf)),
  2841. tx_desc->id,
  2842. ts->status));
  2843. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2844. "-------------------- \n"
  2845. "Tx Completion Stats: \n"
  2846. "-------------------- \n"
  2847. "ack_frame_rssi = %d \n"
  2848. "first_msdu = %d \n"
  2849. "last_msdu = %d \n"
  2850. "msdu_part_of_amsdu = %d \n"
  2851. "rate_stats valid = %d \n"
  2852. "bw = %d \n"
  2853. "pkt_type = %d \n"
  2854. "stbc = %d \n"
  2855. "ldpc = %d \n"
  2856. "sgi = %d \n"
  2857. "mcs = %d \n"
  2858. "ofdma = %d \n"
  2859. "tones_in_ru = %d \n"
  2860. "tsf = %d \n"
  2861. "ppdu_id = %d \n"
  2862. "transmit_cnt = %d \n"
  2863. "tid = %d \n"
  2864. "peer_id = %d\n",
  2865. ts->ack_frame_rssi, ts->first_msdu,
  2866. ts->last_msdu, ts->msdu_part_of_amsdu,
  2867. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  2868. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  2869. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  2870. ts->transmit_cnt, ts->tid, ts->peer_id);
  2871. soc = vdev->pdev->soc;
  2872. /* Update SoC level stats */
  2873. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  2874. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  2875. /* Update per-packet stats for mesh mode */
  2876. if (qdf_unlikely(vdev->mesh_vdev) &&
  2877. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  2878. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  2879. length = qdf_nbuf_len(nbuf);
  2880. /* Update peer level stats */
  2881. if (!peer) {
  2882. QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
  2883. "peer is null or deletion in progress");
  2884. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  2885. goto out;
  2886. }
  2887. if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
  2888. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  2889. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  2890. if ((peer->vdev->tx_encap_type ==
  2891. htt_cmn_pkt_type_ethernet) &&
  2892. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  2893. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  2894. }
  2895. }
  2896. } else {
  2897. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  2898. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
  2899. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  2900. }
  2901. dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
  2902. #ifdef QCA_SUPPORT_RDK_STATS
  2903. if (soc->wlanstats_enabled)
  2904. dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
  2905. tx_desc->timestamp,
  2906. ts->ppdu_id);
  2907. #endif
  2908. out:
  2909. return;
  2910. }
  2911. /**
  2912. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  2913. * @soc: core txrx main context
  2914. * @comp_head: software descriptor head pointer
  2915. * @ring_id: ring number
  2916. *
  2917. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  2918. * and release the software descriptors after processing is complete
  2919. *
  2920. * Return: none
  2921. */
  2922. static void
  2923. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  2924. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  2925. {
  2926. struct dp_tx_desc_s *desc;
  2927. struct dp_tx_desc_s *next;
  2928. struct hal_tx_completion_status ts = {0};
  2929. struct dp_peer *peer;
  2930. qdf_nbuf_t netbuf;
  2931. desc = comp_head;
  2932. while (desc) {
  2933. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  2934. peer = dp_peer_find_by_id(soc, ts.peer_id);
  2935. dp_tx_comp_process_tx_status(desc, &ts, peer, ring_id);
  2936. netbuf = desc->nbuf;
  2937. /* check tx complete notification */
  2938. if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
  2939. dp_tx_notify_completion(soc, desc, netbuf);
  2940. dp_tx_comp_process_desc(soc, desc, &ts, peer);
  2941. if (peer)
  2942. dp_peer_unref_del_find_by_id(peer);
  2943. next = desc->next;
  2944. dp_tx_desc_release(desc, desc->pool_id);
  2945. desc = next;
  2946. }
  2947. }
  2948. /**
  2949. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  2950. * @tx_desc: software descriptor head pointer
  2951. * @status : Tx completion status from HTT descriptor
  2952. * @ring_id: ring number
  2953. *
  2954. * This function will process HTT Tx indication messages from Target
  2955. *
  2956. * Return: none
  2957. */
  2958. static
  2959. void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
  2960. uint8_t ring_id)
  2961. {
  2962. uint8_t tx_status;
  2963. struct dp_pdev *pdev;
  2964. struct dp_vdev *vdev;
  2965. struct dp_soc *soc;
  2966. struct hal_tx_completion_status ts = {0};
  2967. uint32_t *htt_desc = (uint32_t *)status;
  2968. struct dp_peer *peer;
  2969. struct cdp_tid_tx_stats *tid_stats = NULL;
  2970. struct htt_soc *htt_handle;
  2971. qdf_assert(tx_desc->pdev);
  2972. pdev = tx_desc->pdev;
  2973. vdev = tx_desc->vdev;
  2974. soc = pdev->soc;
  2975. if (!vdev)
  2976. return;
  2977. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  2978. htt_handle = (struct htt_soc *)soc->htt_handle;
  2979. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  2980. switch (tx_status) {
  2981. case HTT_TX_FW2WBM_TX_STATUS_OK:
  2982. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  2983. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  2984. {
  2985. uint8_t tid;
  2986. if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
  2987. ts.peer_id =
  2988. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
  2989. htt_desc[2]);
  2990. ts.tid =
  2991. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
  2992. htt_desc[2]);
  2993. } else {
  2994. ts.peer_id = HTT_INVALID_PEER;
  2995. ts.tid = HTT_INVALID_TID;
  2996. }
  2997. ts.ppdu_id =
  2998. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
  2999. htt_desc[1]);
  3000. ts.ack_frame_rssi =
  3001. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
  3002. htt_desc[1]);
  3003. ts.first_msdu = 1;
  3004. ts.last_msdu = 1;
  3005. tid = ts.tid;
  3006. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3007. tid = CDP_MAX_DATA_TIDS - 1;
  3008. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3009. if (qdf_unlikely(pdev->delay_stats_flag))
  3010. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  3011. if (tx_status < CDP_MAX_TX_HTT_STATUS) {
  3012. tid_stats->htt_status_cnt[tx_status]++;
  3013. }
  3014. peer = dp_peer_find_by_id(soc, ts.peer_id);
  3015. if (qdf_likely(peer))
  3016. dp_peer_unref_del_find_by_id(peer);
  3017. dp_tx_comp_process_tx_status(tx_desc, &ts, peer, ring_id);
  3018. dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
  3019. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3020. break;
  3021. }
  3022. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  3023. {
  3024. dp_tx_reinject_handler(tx_desc, status);
  3025. break;
  3026. }
  3027. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  3028. {
  3029. dp_tx_inspect_handler(tx_desc, status);
  3030. break;
  3031. }
  3032. case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
  3033. {
  3034. dp_tx_mec_handler(vdev, status);
  3035. break;
  3036. }
  3037. default:
  3038. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3039. "%s Invalid HTT tx_status %d\n",
  3040. __func__, tx_status);
  3041. break;
  3042. }
  3043. }
  3044. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  3045. static inline
  3046. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3047. {
  3048. bool limit_hit = false;
  3049. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  3050. limit_hit =
  3051. (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
  3052. if (limit_hit)
  3053. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  3054. return limit_hit;
  3055. }
  3056. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3057. {
  3058. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  3059. }
  3060. #else
  3061. static inline
  3062. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3063. {
  3064. return false;
  3065. }
  3066. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3067. {
  3068. return false;
  3069. }
  3070. #endif
  3071. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  3072. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  3073. uint32_t quota)
  3074. {
  3075. void *tx_comp_hal_desc;
  3076. uint8_t buffer_src;
  3077. uint8_t pool_id;
  3078. uint32_t tx_desc_id;
  3079. struct dp_tx_desc_s *tx_desc = NULL;
  3080. struct dp_tx_desc_s *head_desc = NULL;
  3081. struct dp_tx_desc_s *tail_desc = NULL;
  3082. uint32_t num_processed = 0;
  3083. uint32_t count = 0;
  3084. bool force_break = false;
  3085. DP_HIST_INIT();
  3086. more_data:
  3087. /* Re-initialize local variables to be re-used */
  3088. head_desc = NULL;
  3089. tail_desc = NULL;
  3090. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  3091. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  3092. return 0;
  3093. }
  3094. /* Find head descriptor from completion ring */
  3095. while (qdf_likely(tx_comp_hal_desc =
  3096. hal_srng_dst_get_next(soc->hal_soc, hal_ring_hdl))) {
  3097. buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
  3098. /* If this buffer was not released by TQM or FW, then it is not
  3099. * Tx completion indication, assert */
  3100. if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  3101. (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  3102. uint8_t wbm_internal_error;
  3103. dp_err_rl(
  3104. "Tx comp release_src != TQM | FW but from %d",
  3105. buffer_src);
  3106. hal_dump_comp_desc(tx_comp_hal_desc);
  3107. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  3108. /* When WBM sees NULL buffer_addr_info in any of
  3109. * ingress rings it sends an error indication,
  3110. * with wbm_internal_error=1, to a specific ring.
  3111. * The WBM2SW ring used to indicate these errors is
  3112. * fixed in HW, and that ring is being used as Tx
  3113. * completion ring. These errors are not related to
  3114. * Tx completions, and should just be ignored
  3115. */
  3116. wbm_internal_error = hal_get_wbm_internal_error(
  3117. soc->hal_soc,
  3118. tx_comp_hal_desc);
  3119. if (wbm_internal_error) {
  3120. dp_err_rl("Tx comp wbm_internal_error!!");
  3121. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  3122. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  3123. buffer_src)
  3124. dp_handle_wbm_internal_error(
  3125. soc,
  3126. tx_comp_hal_desc,
  3127. hal_tx_comp_get_buffer_type(
  3128. tx_comp_hal_desc));
  3129. } else {
  3130. dp_err_rl("Tx comp wbm_internal_error false");
  3131. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  3132. }
  3133. continue;
  3134. }
  3135. /* Get descriptor id */
  3136. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  3137. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  3138. DP_TX_DESC_ID_POOL_OS;
  3139. /* Find Tx descriptor */
  3140. tx_desc = dp_tx_desc_find(soc, pool_id,
  3141. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  3142. DP_TX_DESC_ID_PAGE_OS,
  3143. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  3144. DP_TX_DESC_ID_OFFSET_OS);
  3145. /*
  3146. * If the descriptor is already freed in vdev_detach,
  3147. * continue to next descriptor
  3148. */
  3149. if (!tx_desc->vdev && !tx_desc->flags) {
  3150. QDF_TRACE(QDF_MODULE_ID_DP,
  3151. QDF_TRACE_LEVEL_INFO,
  3152. "Descriptor freed in vdev_detach %d",
  3153. tx_desc_id);
  3154. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  3155. count++;
  3156. continue;
  3157. }
  3158. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3159. QDF_TRACE(QDF_MODULE_ID_DP,
  3160. QDF_TRACE_LEVEL_INFO,
  3161. "pdev in down state %d",
  3162. tx_desc_id);
  3163. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  3164. count++;
  3165. dp_tx_comp_free_buf(soc, tx_desc);
  3166. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3167. continue;
  3168. }
  3169. /*
  3170. * If the release source is FW, process the HTT status
  3171. */
  3172. if (qdf_unlikely(buffer_src ==
  3173. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  3174. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  3175. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  3176. htt_tx_status);
  3177. dp_tx_process_htt_completion(tx_desc,
  3178. htt_tx_status, ring_id);
  3179. } else {
  3180. /* Pool id is not matching. Error */
  3181. if (tx_desc->pool_id != pool_id) {
  3182. QDF_TRACE(QDF_MODULE_ID_DP,
  3183. QDF_TRACE_LEVEL_FATAL,
  3184. "Tx Comp pool id %d not matched %d",
  3185. pool_id, tx_desc->pool_id);
  3186. qdf_assert_always(0);
  3187. }
  3188. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  3189. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  3190. QDF_TRACE(QDF_MODULE_ID_DP,
  3191. QDF_TRACE_LEVEL_FATAL,
  3192. "Txdesc invalid, flgs = %x,id = %d",
  3193. tx_desc->flags, tx_desc_id);
  3194. qdf_assert_always(0);
  3195. }
  3196. /* First ring descriptor on the cycle */
  3197. if (!head_desc) {
  3198. head_desc = tx_desc;
  3199. tail_desc = tx_desc;
  3200. }
  3201. tail_desc->next = tx_desc;
  3202. tx_desc->next = NULL;
  3203. tail_desc = tx_desc;
  3204. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  3205. /* Collect hw completion contents */
  3206. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  3207. &tx_desc->comp, 1);
  3208. }
  3209. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  3210. /*
  3211. * Processed packet count is more than given quota
  3212. * stop to processing
  3213. */
  3214. if (num_processed >= quota) {
  3215. force_break = true;
  3216. break;
  3217. }
  3218. count++;
  3219. if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
  3220. break;
  3221. }
  3222. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  3223. /* Process the reaped descriptors */
  3224. if (head_desc)
  3225. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  3226. if (dp_tx_comp_enable_eol_data_check(soc)) {
  3227. if (!force_break &&
  3228. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  3229. hal_ring_hdl)) {
  3230. DP_STATS_INC(soc, tx.hp_oos2, 1);
  3231. if (!hif_exec_should_yield(soc->hif_handle,
  3232. int_ctx->dp_intr_id))
  3233. goto more_data;
  3234. }
  3235. }
  3236. DP_TX_HIST_STATS_PER_PDEV();
  3237. return num_processed;
  3238. }
  3239. #ifdef FEATURE_WLAN_TDLS
  3240. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3241. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  3242. {
  3243. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3244. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  3245. if (!vdev) {
  3246. dp_err("vdev handle for id %d is NULL", vdev_id);
  3247. return NULL;
  3248. }
  3249. if (tx_spec & OL_TX_SPEC_NO_FREE)
  3250. vdev->is_tdls_frame = true;
  3251. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  3252. }
  3253. #endif
  3254. /**
  3255. * dp_tx_vdev_attach() - attach vdev to dp tx
  3256. * @vdev: virtual device instance
  3257. *
  3258. * Return: QDF_STATUS_SUCCESS: success
  3259. * QDF_STATUS_E_RESOURCES: Error return
  3260. */
  3261. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  3262. {
  3263. int pdev_id;
  3264. /*
  3265. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  3266. */
  3267. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  3268. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  3269. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  3270. vdev->vdev_id);
  3271. pdev_id =
  3272. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  3273. vdev->pdev->pdev_id);
  3274. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  3275. /*
  3276. * Set HTT Extension Valid bit to 0 by default
  3277. */
  3278. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  3279. dp_tx_vdev_update_search_flags(vdev);
  3280. return QDF_STATUS_SUCCESS;
  3281. }
  3282. #ifndef FEATURE_WDS
  3283. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  3284. {
  3285. return false;
  3286. }
  3287. #endif
  3288. /**
  3289. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  3290. * @vdev: virtual device instance
  3291. *
  3292. * Return: void
  3293. *
  3294. */
  3295. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  3296. {
  3297. struct dp_soc *soc = vdev->pdev->soc;
  3298. /*
  3299. * Enable both AddrY (SA based search) and AddrX (Da based search)
  3300. * for TDLS link
  3301. *
  3302. * Enable AddrY (SA based search) only for non-WDS STA and
  3303. * ProxySTA VAP (in HKv1) modes.
  3304. *
  3305. * In all other VAP modes, only DA based search should be
  3306. * enabled
  3307. */
  3308. if (vdev->opmode == wlan_op_mode_sta &&
  3309. vdev->tdls_link_connected)
  3310. vdev->hal_desc_addr_search_flags =
  3311. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  3312. else if ((vdev->opmode == wlan_op_mode_sta) &&
  3313. !dp_tx_da_search_override(vdev))
  3314. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  3315. else
  3316. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  3317. /* Set search type only when peer map v2 messaging is enabled
  3318. * as we will have the search index (AST hash) only when v2 is
  3319. * enabled
  3320. */
  3321. if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
  3322. vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
  3323. else
  3324. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  3325. }
  3326. static inline bool
  3327. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  3328. struct dp_vdev *vdev,
  3329. struct dp_tx_desc_s *tx_desc)
  3330. {
  3331. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  3332. return false;
  3333. /*
  3334. * if vdev is given, then only check whether desc
  3335. * vdev match. if vdev is NULL, then check whether
  3336. * desc pdev match.
  3337. */
  3338. return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev);
  3339. }
  3340. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3341. /**
  3342. * dp_tx_desc_flush() - release resources associated
  3343. * to TX Desc
  3344. *
  3345. * @dp_pdev: Handle to DP pdev structure
  3346. * @vdev: virtual device instance
  3347. * NULL: no specific Vdev is required and check all allcated TX desc
  3348. * on this pdev.
  3349. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  3350. *
  3351. * @force_free:
  3352. * true: flush the TX desc.
  3353. * false: only reset the Vdev in each allocated TX desc
  3354. * that associated to current Vdev.
  3355. *
  3356. * This function will go through the TX desc pool to flush
  3357. * the outstanding TX data or reset Vdev to NULL in associated TX
  3358. * Desc.
  3359. */
  3360. static void dp_tx_desc_flush(struct dp_pdev *pdev,
  3361. struct dp_vdev *vdev,
  3362. bool force_free)
  3363. {
  3364. uint8_t i;
  3365. uint32_t j;
  3366. uint32_t num_desc, page_id, offset;
  3367. uint16_t num_desc_per_page;
  3368. struct dp_soc *soc = pdev->soc;
  3369. struct dp_tx_desc_s *tx_desc = NULL;
  3370. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  3371. if (!vdev && !force_free) {
  3372. dp_err("Reset TX desc vdev, Vdev param is required!");
  3373. return;
  3374. }
  3375. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  3376. tx_desc_pool = &soc->tx_desc[i];
  3377. if (!(tx_desc_pool->pool_size) ||
  3378. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  3379. !(tx_desc_pool->desc_pages.cacheable_pages))
  3380. continue;
  3381. /*
  3382. * Add flow pool lock protection in case pool is freed
  3383. * due to all tx_desc is recycled when handle TX completion.
  3384. * this is not necessary when do force flush as:
  3385. * a. double lock will happen if dp_tx_desc_release is
  3386. * also trying to acquire it.
  3387. * b. dp interrupt has been disabled before do force TX desc
  3388. * flush in dp_pdev_deinit().
  3389. */
  3390. if (!force_free)
  3391. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  3392. num_desc = tx_desc_pool->pool_size;
  3393. num_desc_per_page =
  3394. tx_desc_pool->desc_pages.num_element_per_page;
  3395. for (j = 0; j < num_desc; j++) {
  3396. page_id = j / num_desc_per_page;
  3397. offset = j % num_desc_per_page;
  3398. if (qdf_unlikely(!(tx_desc_pool->
  3399. desc_pages.cacheable_pages)))
  3400. break;
  3401. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  3402. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  3403. /*
  3404. * Free TX desc if force free is
  3405. * required, otherwise only reset vdev
  3406. * in this TX desc.
  3407. */
  3408. if (force_free) {
  3409. dp_tx_comp_free_buf(soc, tx_desc);
  3410. dp_tx_desc_release(tx_desc, i);
  3411. } else {
  3412. tx_desc->vdev = NULL;
  3413. }
  3414. }
  3415. }
  3416. if (!force_free)
  3417. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  3418. }
  3419. }
  3420. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  3421. /**
  3422. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  3423. *
  3424. * @soc: Handle to DP soc structure
  3425. * @tx_desc: pointer of one TX desc
  3426. * @desc_pool_id: TX Desc pool id
  3427. */
  3428. static inline void
  3429. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  3430. uint8_t desc_pool_id)
  3431. {
  3432. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  3433. tx_desc->vdev = NULL;
  3434. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  3435. }
  3436. static void dp_tx_desc_flush(struct dp_pdev *pdev,
  3437. struct dp_vdev *vdev,
  3438. bool force_free)
  3439. {
  3440. uint8_t i, num_pool;
  3441. uint32_t j;
  3442. uint32_t num_desc, page_id, offset;
  3443. uint16_t num_desc_per_page;
  3444. struct dp_soc *soc = pdev->soc;
  3445. struct dp_tx_desc_s *tx_desc = NULL;
  3446. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  3447. if (!vdev && !force_free) {
  3448. dp_err("Reset TX desc vdev, Vdev param is required!");
  3449. return;
  3450. }
  3451. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3452. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3453. for (i = 0; i < num_pool; i++) {
  3454. tx_desc_pool = &soc->tx_desc[i];
  3455. if (!tx_desc_pool->desc_pages.cacheable_pages)
  3456. continue;
  3457. num_desc_per_page =
  3458. tx_desc_pool->desc_pages.num_element_per_page;
  3459. for (j = 0; j < num_desc; j++) {
  3460. page_id = j / num_desc_per_page;
  3461. offset = j % num_desc_per_page;
  3462. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  3463. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  3464. if (force_free) {
  3465. dp_tx_comp_free_buf(soc, tx_desc);
  3466. dp_tx_desc_release(tx_desc, i);
  3467. } else {
  3468. dp_tx_desc_reset_vdev(soc, tx_desc,
  3469. i);
  3470. }
  3471. }
  3472. }
  3473. }
  3474. }
  3475. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  3476. /**
  3477. * dp_tx_vdev_detach() - detach vdev from dp tx
  3478. * @vdev: virtual device instance
  3479. *
  3480. * Return: QDF_STATUS_SUCCESS: success
  3481. * QDF_STATUS_E_RESOURCES: Error return
  3482. */
  3483. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  3484. {
  3485. struct dp_pdev *pdev = vdev->pdev;
  3486. /* Reset TX desc associated to this Vdev as NULL */
  3487. dp_tx_desc_flush(pdev, vdev, false);
  3488. dp_tx_vdev_multipass_deinit(vdev);
  3489. return QDF_STATUS_SUCCESS;
  3490. }
  3491. /**
  3492. * dp_tx_pdev_attach() - attach pdev to dp tx
  3493. * @pdev: physical device instance
  3494. *
  3495. * Return: QDF_STATUS_SUCCESS: success
  3496. * QDF_STATUS_E_RESOURCES: Error return
  3497. */
  3498. QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
  3499. {
  3500. struct dp_soc *soc = pdev->soc;
  3501. /* Initialize Flow control counters */
  3502. qdf_atomic_init(&pdev->num_tx_exception);
  3503. qdf_atomic_init(&pdev->num_tx_outstanding);
  3504. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3505. /* Initialize descriptors in TCL Ring */
  3506. hal_tx_init_data_ring(soc->hal_soc,
  3507. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  3508. }
  3509. return QDF_STATUS_SUCCESS;
  3510. }
  3511. /**
  3512. * dp_tx_pdev_detach() - detach pdev from dp tx
  3513. * @pdev: physical device instance
  3514. *
  3515. * Return: QDF_STATUS_SUCCESS: success
  3516. * QDF_STATUS_E_RESOURCES: Error return
  3517. */
  3518. QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
  3519. {
  3520. /* flush TX outstanding data per pdev */
  3521. dp_tx_desc_flush(pdev, NULL, true);
  3522. dp_tx_me_exit(pdev);
  3523. return QDF_STATUS_SUCCESS;
  3524. }
  3525. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3526. /* Pools will be allocated dynamically */
  3527. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  3528. int num_desc)
  3529. {
  3530. uint8_t i;
  3531. for (i = 0; i < num_pool; i++) {
  3532. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  3533. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  3534. }
  3535. return 0;
  3536. }
  3537. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  3538. {
  3539. uint8_t i;
  3540. for (i = 0; i < num_pool; i++)
  3541. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  3542. }
  3543. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  3544. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  3545. int num_desc)
  3546. {
  3547. uint8_t i;
  3548. /* Allocate software Tx descriptor pools */
  3549. for (i = 0; i < num_pool; i++) {
  3550. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  3551. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3552. "%s Tx Desc Pool alloc %d failed %pK",
  3553. __func__, i, soc);
  3554. return ENOMEM;
  3555. }
  3556. }
  3557. return 0;
  3558. }
  3559. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  3560. {
  3561. uint8_t i;
  3562. for (i = 0; i < num_pool; i++) {
  3563. qdf_assert_always(!soc->tx_desc[i].num_allocated);
  3564. if (dp_tx_desc_pool_free(soc, i)) {
  3565. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3566. "%s Tx Desc Pool Free failed", __func__);
  3567. }
  3568. }
  3569. }
  3570. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  3571. #ifndef QCA_MEM_ATTACH_ON_WIFI3
  3572. /**
  3573. * dp_tso_attach_wifi3() - TSO attach handler
  3574. * @txrx_soc: Opaque Dp handle
  3575. *
  3576. * Reserve TSO descriptor buffers
  3577. *
  3578. * Return: QDF_STATUS_E_FAILURE on failure or
  3579. * QDF_STATUS_SUCCESS on success
  3580. */
  3581. static
  3582. QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
  3583. {
  3584. return dp_tso_soc_attach(txrx_soc);
  3585. }
  3586. /**
  3587. * dp_tso_detach_wifi3() - TSO Detach handler
  3588. * @txrx_soc: Opaque Dp handle
  3589. *
  3590. * Deallocate TSO descriptor buffers
  3591. *
  3592. * Return: QDF_STATUS_E_FAILURE on failure or
  3593. * QDF_STATUS_SUCCESS on success
  3594. */
  3595. static
  3596. QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
  3597. {
  3598. return dp_tso_soc_detach(txrx_soc);
  3599. }
  3600. #else
  3601. static
  3602. QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
  3603. {
  3604. return QDF_STATUS_SUCCESS;
  3605. }
  3606. static
  3607. QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
  3608. {
  3609. return QDF_STATUS_SUCCESS;
  3610. }
  3611. #endif
  3612. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  3613. {
  3614. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3615. uint8_t i;
  3616. uint8_t num_pool;
  3617. uint32_t num_desc;
  3618. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3619. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3620. for (i = 0; i < num_pool; i++)
  3621. dp_tx_tso_desc_pool_free(soc, i);
  3622. dp_info("%s TSO Desc Pool %d Free descs = %d",
  3623. __func__, num_pool, num_desc);
  3624. for (i = 0; i < num_pool; i++)
  3625. dp_tx_tso_num_seg_pool_free(soc, i);
  3626. dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
  3627. __func__, num_pool, num_desc);
  3628. return QDF_STATUS_SUCCESS;
  3629. }
  3630. /**
  3631. * dp_tso_attach() - TSO attach handler
  3632. * @txrx_soc: Opaque Dp handle
  3633. *
  3634. * Reserve TSO descriptor buffers
  3635. *
  3636. * Return: QDF_STATUS_E_FAILURE on failure or
  3637. * QDF_STATUS_SUCCESS on success
  3638. */
  3639. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  3640. {
  3641. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3642. uint8_t i;
  3643. uint8_t num_pool;
  3644. uint32_t num_desc;
  3645. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3646. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3647. for (i = 0; i < num_pool; i++) {
  3648. if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
  3649. dp_err("TSO Desc Pool alloc %d failed %pK",
  3650. i, soc);
  3651. return QDF_STATUS_E_FAILURE;
  3652. }
  3653. }
  3654. dp_info("%s TSO Desc Alloc %d, descs = %d",
  3655. __func__, num_pool, num_desc);
  3656. for (i = 0; i < num_pool; i++) {
  3657. if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
  3658. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  3659. i, soc);
  3660. return QDF_STATUS_E_FAILURE;
  3661. }
  3662. }
  3663. return QDF_STATUS_SUCCESS;
  3664. }
  3665. /**
  3666. * dp_tx_soc_detach() - detach soc from dp tx
  3667. * @soc: core txrx main context
  3668. *
  3669. * This function will detach dp tx into main device context
  3670. * will free dp tx resource and initialize resources
  3671. *
  3672. * Return: QDF_STATUS_SUCCESS: success
  3673. * QDF_STATUS_E_RESOURCES: Error return
  3674. */
  3675. QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
  3676. {
  3677. uint8_t num_pool;
  3678. uint16_t num_desc;
  3679. uint16_t num_ext_desc;
  3680. uint8_t i;
  3681. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3682. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3683. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3684. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  3685. dp_tx_flow_control_deinit(soc);
  3686. dp_tx_delete_static_pools(soc, num_pool);
  3687. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3688. "%s Tx Desc Pool Free num_pool = %d, descs = %d",
  3689. __func__, num_pool, num_desc);
  3690. for (i = 0; i < num_pool; i++) {
  3691. if (dp_tx_ext_desc_pool_free(soc, i)) {
  3692. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3693. "%s Tx Ext Desc Pool Free failed",
  3694. __func__);
  3695. return QDF_STATUS_E_RESOURCES;
  3696. }
  3697. }
  3698. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3699. "%s MSDU Ext Desc Pool %d Free descs = %d",
  3700. __func__, num_pool, num_ext_desc);
  3701. status = dp_tso_detach_wifi3(soc);
  3702. if (status != QDF_STATUS_SUCCESS)
  3703. return status;
  3704. return QDF_STATUS_SUCCESS;
  3705. }
  3706. /**
  3707. * dp_tx_soc_attach() - attach soc to dp tx
  3708. * @soc: core txrx main context
  3709. *
  3710. * This function will attach dp tx into main device context
  3711. * will allocate dp tx resource and initialize resources
  3712. *
  3713. * Return: QDF_STATUS_SUCCESS: success
  3714. * QDF_STATUS_E_RESOURCES: Error return
  3715. */
  3716. QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
  3717. {
  3718. uint8_t i;
  3719. uint8_t num_pool;
  3720. uint32_t num_desc;
  3721. uint32_t num_ext_desc;
  3722. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3723. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3724. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3725. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  3726. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3727. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  3728. __func__, num_pool, num_desc);
  3729. if ((num_pool > MAX_TXDESC_POOLS) ||
  3730. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  3731. goto fail;
  3732. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  3733. goto fail;
  3734. dp_tx_flow_control_init(soc);
  3735. /* Allocate extension tx descriptor pools */
  3736. for (i = 0; i < num_pool; i++) {
  3737. if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
  3738. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3739. "MSDU Ext Desc Pool alloc %d failed %pK",
  3740. i, soc);
  3741. goto fail;
  3742. }
  3743. }
  3744. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3745. "%s MSDU Ext Desc Alloc %d, descs = %d",
  3746. __func__, num_pool, num_ext_desc);
  3747. status = dp_tso_attach_wifi3((void *)soc);
  3748. if (status != QDF_STATUS_SUCCESS)
  3749. goto fail;
  3750. /* Initialize descriptors in TCL Rings */
  3751. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3752. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3753. hal_tx_init_data_ring(soc->hal_soc,
  3754. soc->tcl_data_ring[i].hal_srng);
  3755. }
  3756. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
  3757. hal_tx_init_data_ring(soc->hal_soc,
  3758. soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
  3759. }
  3760. /*
  3761. * Initialize command/credit ring descriptor
  3762. * Command/CREDIT ring also used for sending DATA cmds
  3763. */
  3764. hal_tx_init_cmd_credit_ring(soc->hal_soc,
  3765. soc->tcl_cmd_credit_ring.hal_srng);
  3766. /*
  3767. * todo - Add a runtime config option to enable this.
  3768. */
  3769. /*
  3770. * Due to multiple issues on NPR EMU, enable it selectively
  3771. * only for NPR EMU, should be removed, once NPR platforms
  3772. * are stable.
  3773. */
  3774. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  3775. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3776. "%s HAL Tx init Success", __func__);
  3777. return QDF_STATUS_SUCCESS;
  3778. fail:
  3779. /* Detach will take care of freeing only allocated resources */
  3780. dp_tx_soc_detach(soc);
  3781. return QDF_STATUS_E_RESOURCES;
  3782. }