dp_tx.c 140 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_htt.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include "dp_peer.h"
  24. #include "dp_types.h"
  25. #include "hal_tx.h"
  26. #include "qdf_mem.h"
  27. #include "qdf_nbuf.h"
  28. #include "qdf_net_types.h"
  29. #include <wlan_cfg.h>
  30. #include "dp_ipa.h"
  31. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  32. #include "if_meta_hdr.h"
  33. #endif
  34. #include "enet.h"
  35. #include "dp_internal.h"
  36. #ifdef FEATURE_WDS
  37. #include "dp_txrx_wds.h"
  38. #endif
  39. #ifdef ATH_SUPPORT_IQUE
  40. #include "dp_txrx_me.h"
  41. #endif
  42. #include "dp_hist.h"
  43. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  44. #include <dp_swlm.h>
  45. #endif
  46. /* Flag to skip CCE classify when mesh or tid override enabled */
  47. #define DP_TX_SKIP_CCE_CLASSIFY \
  48. (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
  49. /* TODO Add support in TSO */
  50. #define DP_DESC_NUM_FRAG(x) 0
  51. /* disable TQM_BYPASS */
  52. #define TQM_BYPASS_WAR 0
  53. /* invalid peer id for reinject*/
  54. #define DP_INVALID_PEER 0XFFFE
  55. /*mapping between hal encrypt type and cdp_sec_type*/
  56. #define MAX_CDP_SEC_TYPE 12
  57. static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
  58. HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  59. HAL_TX_ENCRYPT_TYPE_WEP_128,
  60. HAL_TX_ENCRYPT_TYPE_WEP_104,
  61. HAL_TX_ENCRYPT_TYPE_WEP_40,
  62. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  63. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  64. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  65. HAL_TX_ENCRYPT_TYPE_WAPI,
  66. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  67. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  68. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  69. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  70. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  71. /**
  72. * dp_update_tx_desc_stats - Update the increase or decrease in
  73. * outstanding tx desc count
  74. * values on pdev and soc
  75. * @vdev: DP pdev handle
  76. *
  77. * Return: void
  78. */
  79. static inline void
  80. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  81. {
  82. int32_t tx_descs_cnt =
  83. qdf_atomic_read(&pdev->num_tx_outstanding);
  84. if (pdev->tx_descs_max < tx_descs_cnt)
  85. pdev->tx_descs_max = tx_descs_cnt;
  86. qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
  87. pdev->tx_descs_max);
  88. }
  89. #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
  90. static inline void
  91. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  92. {
  93. }
  94. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  95. #ifdef QCA_TX_LIMIT_CHECK
  96. /**
  97. * dp_tx_limit_check - Check if allocated tx descriptors reached
  98. * soc max limit and pdev max limit
  99. * @vdev: DP vdev handle
  100. *
  101. * Return: true if allocated tx descriptors reached max configured value, else
  102. * false
  103. */
  104. static inline bool
  105. dp_tx_limit_check(struct dp_vdev *vdev)
  106. {
  107. struct dp_pdev *pdev = vdev->pdev;
  108. struct dp_soc *soc = pdev->soc;
  109. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  110. soc->num_tx_allowed) {
  111. dp_tx_info("queued packets are more than max tx, drop the frame");
  112. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  113. return true;
  114. }
  115. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  116. pdev->num_tx_allowed) {
  117. dp_tx_info("queued packets are more than max tx, drop the frame");
  118. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  119. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
  120. return true;
  121. }
  122. return false;
  123. }
  124. /**
  125. * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
  126. * reached soc max limit
  127. * @vdev: DP vdev handle
  128. *
  129. * Return: true if allocated tx descriptors reached max configured value, else
  130. * false
  131. */
  132. static inline bool
  133. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  134. {
  135. struct dp_pdev *pdev = vdev->pdev;
  136. struct dp_soc *soc = pdev->soc;
  137. if (qdf_atomic_read(&soc->num_tx_exception) >=
  138. soc->num_msdu_exception_desc) {
  139. dp_info("exc packets are more than max drop the exc pkt");
  140. DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
  141. return true;
  142. }
  143. return false;
  144. }
  145. /**
  146. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  147. * @vdev: DP pdev handle
  148. *
  149. * Return: void
  150. */
  151. static inline void
  152. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  153. {
  154. struct dp_soc *soc = pdev->soc;
  155. qdf_atomic_inc(&pdev->num_tx_outstanding);
  156. qdf_atomic_inc(&soc->num_tx_outstanding);
  157. dp_update_tx_desc_stats(pdev);
  158. }
  159. /**
  160. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  161. * @vdev: DP pdev handle
  162. *
  163. * Return: void
  164. */
  165. static inline void
  166. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  167. {
  168. struct dp_soc *soc = pdev->soc;
  169. qdf_atomic_dec(&pdev->num_tx_outstanding);
  170. qdf_atomic_dec(&soc->num_tx_outstanding);
  171. dp_update_tx_desc_stats(pdev);
  172. }
  173. #else //QCA_TX_LIMIT_CHECK
  174. static inline bool
  175. dp_tx_limit_check(struct dp_vdev *vdev)
  176. {
  177. return false;
  178. }
  179. static inline bool
  180. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  181. {
  182. return false;
  183. }
  184. static inline void
  185. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  186. {
  187. qdf_atomic_inc(&pdev->num_tx_outstanding);
  188. dp_update_tx_desc_stats(pdev);
  189. }
  190. static inline void
  191. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  192. {
  193. qdf_atomic_dec(&pdev->num_tx_outstanding);
  194. dp_update_tx_desc_stats(pdev);
  195. }
  196. #endif //QCA_TX_LIMIT_CHECK
  197. #if defined(FEATURE_TSO)
  198. /**
  199. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  200. *
  201. * @soc - core txrx main context
  202. * @seg_desc - tso segment descriptor
  203. * @num_seg_desc - tso number segment descriptor
  204. */
  205. static void dp_tx_tso_unmap_segment(
  206. struct dp_soc *soc,
  207. struct qdf_tso_seg_elem_t *seg_desc,
  208. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  209. {
  210. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  211. if (qdf_unlikely(!seg_desc)) {
  212. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  213. __func__, __LINE__);
  214. qdf_assert(0);
  215. } else if (qdf_unlikely(!num_seg_desc)) {
  216. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  217. __func__, __LINE__);
  218. qdf_assert(0);
  219. } else {
  220. bool is_last_seg;
  221. /* no tso segment left to do dma unmap */
  222. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  223. return;
  224. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  225. true : false;
  226. qdf_nbuf_unmap_tso_segment(soc->osdev,
  227. seg_desc, is_last_seg);
  228. num_seg_desc->num_seg.tso_cmn_num_seg--;
  229. }
  230. }
  231. /**
  232. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  233. * back to the freelist
  234. *
  235. * @soc - soc device handle
  236. * @tx_desc - Tx software descriptor
  237. */
  238. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  239. struct dp_tx_desc_s *tx_desc)
  240. {
  241. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  242. if (qdf_unlikely(!tx_desc->tso_desc)) {
  243. dp_tx_err("SO desc is NULL!");
  244. qdf_assert(0);
  245. } else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
  246. dp_tx_err("TSO num desc is NULL!");
  247. qdf_assert(0);
  248. } else {
  249. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  250. (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
  251. /* Add the tso num segment into the free list */
  252. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  253. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  254. tx_desc->tso_num_desc);
  255. tx_desc->tso_num_desc = NULL;
  256. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  257. }
  258. /* Add the tso segment into the free list*/
  259. dp_tx_tso_desc_free(soc,
  260. tx_desc->pool_id, tx_desc->tso_desc);
  261. tx_desc->tso_desc = NULL;
  262. }
  263. }
  264. #else
  265. static void dp_tx_tso_unmap_segment(
  266. struct dp_soc *soc,
  267. struct qdf_tso_seg_elem_t *seg_desc,
  268. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  269. {
  270. }
  271. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  272. struct dp_tx_desc_s *tx_desc)
  273. {
  274. }
  275. #endif
  276. /**
  277. * dp_tx_desc_release() - Release Tx Descriptor
  278. * @tx_desc : Tx Descriptor
  279. * @desc_pool_id: Descriptor Pool ID
  280. *
  281. * Deallocate all resources attached to Tx descriptor and free the Tx
  282. * descriptor.
  283. *
  284. * Return:
  285. */
  286. static void
  287. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  288. {
  289. struct dp_pdev *pdev = tx_desc->pdev;
  290. struct dp_soc *soc;
  291. uint8_t comp_status = 0;
  292. qdf_assert(pdev);
  293. soc = pdev->soc;
  294. dp_tx_outstanding_dec(pdev);
  295. if (tx_desc->frm_type == dp_tx_frm_tso)
  296. dp_tx_tso_desc_release(soc, tx_desc);
  297. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  298. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  299. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  300. dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
  301. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  302. qdf_atomic_dec(&soc->num_tx_exception);
  303. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  304. hal_tx_comp_get_buffer_source(&tx_desc->comp))
  305. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  306. soc->hal_soc);
  307. else
  308. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  309. dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
  310. tx_desc->id, comp_status,
  311. qdf_atomic_read(&pdev->num_tx_outstanding));
  312. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  313. return;
  314. }
  315. /**
  316. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  317. * @vdev: DP vdev Handle
  318. * @nbuf: skb
  319. * @msdu_info: msdu_info required to create HTT metadata
  320. *
  321. * Prepares and fills HTT metadata in the frame pre-header for special frames
  322. * that should be transmitted using varying transmit parameters.
  323. * There are 2 VDEV modes that currently needs this special metadata -
  324. * 1) Mesh Mode
  325. * 2) DSRC Mode
  326. *
  327. * Return: HTT metadata size
  328. *
  329. */
  330. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  331. struct dp_tx_msdu_info_s *msdu_info)
  332. {
  333. uint32_t *meta_data = msdu_info->meta_data;
  334. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  335. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  336. uint8_t htt_desc_size;
  337. /* Size rounded of multiple of 8 bytes */
  338. uint8_t htt_desc_size_aligned;
  339. uint8_t *hdr = NULL;
  340. /*
  341. * Metadata - HTT MSDU Extension header
  342. */
  343. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  344. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  345. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  346. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  347. meta_data[0])) {
  348. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  349. htt_desc_size_aligned)) {
  350. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  351. htt_desc_size_aligned);
  352. if (!nbuf) {
  353. /*
  354. * qdf_nbuf_realloc_headroom won't do skb_clone
  355. * as skb_realloc_headroom does. so, no free is
  356. * needed here.
  357. */
  358. DP_STATS_INC(vdev,
  359. tx_i.dropped.headroom_insufficient,
  360. 1);
  361. qdf_print(" %s[%d] skb_realloc_headroom failed",
  362. __func__, __LINE__);
  363. return 0;
  364. }
  365. }
  366. /* Fill and add HTT metaheader */
  367. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  368. if (!hdr) {
  369. dp_tx_err("Error in filling HTT metadata");
  370. return 0;
  371. }
  372. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  373. } else if (vdev->opmode == wlan_op_mode_ocb) {
  374. /* Todo - Add support for DSRC */
  375. }
  376. return htt_desc_size_aligned;
  377. }
  378. /**
  379. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  380. * @tso_seg: TSO segment to process
  381. * @ext_desc: Pointer to MSDU extension descriptor
  382. *
  383. * Return: void
  384. */
  385. #if defined(FEATURE_TSO)
  386. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  387. void *ext_desc)
  388. {
  389. uint8_t num_frag;
  390. uint32_t tso_flags;
  391. /*
  392. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  393. * tcp_flag_mask
  394. *
  395. * Checksum enable flags are set in TCL descriptor and not in Extension
  396. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  397. */
  398. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  399. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  400. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  401. tso_seg->tso_flags.ip_len);
  402. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  403. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  404. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  405. uint32_t lo = 0;
  406. uint32_t hi = 0;
  407. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  408. (tso_seg->tso_frags[num_frag].length));
  409. qdf_dmaaddr_to_32s(
  410. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  411. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  412. tso_seg->tso_frags[num_frag].length);
  413. }
  414. return;
  415. }
  416. #else
  417. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  418. void *ext_desc)
  419. {
  420. return;
  421. }
  422. #endif
  423. #if defined(FEATURE_TSO)
  424. /**
  425. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  426. * allocated and free them
  427. *
  428. * @soc: soc handle
  429. * @free_seg: list of tso segments
  430. * @msdu_info: msdu descriptor
  431. *
  432. * Return - void
  433. */
  434. static void dp_tx_free_tso_seg_list(
  435. struct dp_soc *soc,
  436. struct qdf_tso_seg_elem_t *free_seg,
  437. struct dp_tx_msdu_info_s *msdu_info)
  438. {
  439. struct qdf_tso_seg_elem_t *next_seg;
  440. while (free_seg) {
  441. next_seg = free_seg->next;
  442. dp_tx_tso_desc_free(soc,
  443. msdu_info->tx_queue.desc_pool_id,
  444. free_seg);
  445. free_seg = next_seg;
  446. }
  447. }
  448. /**
  449. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  450. * allocated and free them
  451. *
  452. * @soc: soc handle
  453. * @free_num_seg: list of tso number segments
  454. * @msdu_info: msdu descriptor
  455. * Return - void
  456. */
  457. static void dp_tx_free_tso_num_seg_list(
  458. struct dp_soc *soc,
  459. struct qdf_tso_num_seg_elem_t *free_num_seg,
  460. struct dp_tx_msdu_info_s *msdu_info)
  461. {
  462. struct qdf_tso_num_seg_elem_t *next_num_seg;
  463. while (free_num_seg) {
  464. next_num_seg = free_num_seg->next;
  465. dp_tso_num_seg_free(soc,
  466. msdu_info->tx_queue.desc_pool_id,
  467. free_num_seg);
  468. free_num_seg = next_num_seg;
  469. }
  470. }
  471. /**
  472. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  473. * do dma unmap for each segment
  474. *
  475. * @soc: soc handle
  476. * @free_seg: list of tso segments
  477. * @num_seg_desc: tso number segment descriptor
  478. *
  479. * Return - void
  480. */
  481. static void dp_tx_unmap_tso_seg_list(
  482. struct dp_soc *soc,
  483. struct qdf_tso_seg_elem_t *free_seg,
  484. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  485. {
  486. struct qdf_tso_seg_elem_t *next_seg;
  487. if (qdf_unlikely(!num_seg_desc)) {
  488. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  489. return;
  490. }
  491. while (free_seg) {
  492. next_seg = free_seg->next;
  493. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  494. free_seg = next_seg;
  495. }
  496. }
  497. #ifdef FEATURE_TSO_STATS
  498. /**
  499. * dp_tso_get_stats_idx: Retrieve the tso packet id
  500. * @pdev - pdev handle
  501. *
  502. * Return: id
  503. */
  504. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  505. {
  506. uint32_t stats_idx;
  507. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  508. % CDP_MAX_TSO_PACKETS);
  509. return stats_idx;
  510. }
  511. #else
  512. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  513. {
  514. return 0;
  515. }
  516. #endif /* FEATURE_TSO_STATS */
  517. /**
  518. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  519. * free the tso segments descriptor and
  520. * tso num segments descriptor
  521. *
  522. * @soc: soc handle
  523. * @msdu_info: msdu descriptor
  524. * @tso_seg_unmap: flag to show if dma unmap is necessary
  525. *
  526. * Return - void
  527. */
  528. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  529. struct dp_tx_msdu_info_s *msdu_info,
  530. bool tso_seg_unmap)
  531. {
  532. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  533. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  534. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  535. tso_info->tso_num_seg_list;
  536. /* do dma unmap for each segment */
  537. if (tso_seg_unmap)
  538. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  539. /* free all tso number segment descriptor though looks only have 1 */
  540. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  541. /* free all tso segment descriptor */
  542. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  543. }
  544. /**
  545. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  546. * @vdev: virtual device handle
  547. * @msdu: network buffer
  548. * @msdu_info: meta data associated with the msdu
  549. *
  550. * Return: QDF_STATUS_SUCCESS success
  551. */
  552. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  553. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  554. {
  555. struct qdf_tso_seg_elem_t *tso_seg;
  556. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  557. struct dp_soc *soc = vdev->pdev->soc;
  558. struct dp_pdev *pdev = vdev->pdev;
  559. struct qdf_tso_info_t *tso_info;
  560. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  561. tso_info = &msdu_info->u.tso_info;
  562. tso_info->curr_seg = NULL;
  563. tso_info->tso_seg_list = NULL;
  564. tso_info->num_segs = num_seg;
  565. msdu_info->frm_type = dp_tx_frm_tso;
  566. tso_info->tso_num_seg_list = NULL;
  567. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  568. while (num_seg) {
  569. tso_seg = dp_tx_tso_desc_alloc(
  570. soc, msdu_info->tx_queue.desc_pool_id);
  571. if (tso_seg) {
  572. tso_seg->next = tso_info->tso_seg_list;
  573. tso_info->tso_seg_list = tso_seg;
  574. num_seg--;
  575. } else {
  576. dp_err_rl("Failed to alloc tso seg desc");
  577. DP_STATS_INC_PKT(vdev->pdev,
  578. tso_stats.tso_no_mem_dropped, 1,
  579. qdf_nbuf_len(msdu));
  580. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  581. return QDF_STATUS_E_NOMEM;
  582. }
  583. }
  584. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  585. tso_num_seg = dp_tso_num_seg_alloc(soc,
  586. msdu_info->tx_queue.desc_pool_id);
  587. if (tso_num_seg) {
  588. tso_num_seg->next = tso_info->tso_num_seg_list;
  589. tso_info->tso_num_seg_list = tso_num_seg;
  590. } else {
  591. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  592. __func__);
  593. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  594. return QDF_STATUS_E_NOMEM;
  595. }
  596. msdu_info->num_seg =
  597. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  598. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  599. msdu_info->num_seg);
  600. if (!(msdu_info->num_seg)) {
  601. /*
  602. * Free allocated TSO seg desc and number seg desc,
  603. * do unmap for segments if dma map has done.
  604. */
  605. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  606. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  607. return QDF_STATUS_E_INVAL;
  608. }
  609. tso_info->curr_seg = tso_info->tso_seg_list;
  610. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  611. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  612. msdu, msdu_info->num_seg);
  613. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  614. tso_info->msdu_stats_idx);
  615. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  616. return QDF_STATUS_SUCCESS;
  617. }
  618. #else
  619. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  620. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  621. {
  622. return QDF_STATUS_E_NOMEM;
  623. }
  624. #endif
  625. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  626. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  627. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  628. /**
  629. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  630. * @vdev: DP Vdev handle
  631. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  632. * @desc_pool_id: Descriptor Pool ID
  633. *
  634. * Return:
  635. */
  636. static
  637. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  638. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  639. {
  640. uint8_t i;
  641. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  642. struct dp_tx_seg_info_s *seg_info;
  643. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  644. struct dp_soc *soc = vdev->pdev->soc;
  645. /* Allocate an extension descriptor */
  646. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  647. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  648. if (!msdu_ext_desc) {
  649. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  650. return NULL;
  651. }
  652. if (msdu_info->exception_fw &&
  653. qdf_unlikely(vdev->mesh_vdev)) {
  654. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  655. &msdu_info->meta_data[0],
  656. sizeof(struct htt_tx_msdu_desc_ext2_t));
  657. qdf_atomic_inc(&soc->num_tx_exception);
  658. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  659. }
  660. switch (msdu_info->frm_type) {
  661. case dp_tx_frm_sg:
  662. case dp_tx_frm_me:
  663. case dp_tx_frm_raw:
  664. seg_info = msdu_info->u.sg_info.curr_seg;
  665. /* Update the buffer pointers in MSDU Extension Descriptor */
  666. for (i = 0; i < seg_info->frag_cnt; i++) {
  667. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  668. seg_info->frags[i].paddr_lo,
  669. seg_info->frags[i].paddr_hi,
  670. seg_info->frags[i].len);
  671. }
  672. break;
  673. case dp_tx_frm_tso:
  674. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  675. &cached_ext_desc[0]);
  676. break;
  677. default:
  678. break;
  679. }
  680. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  681. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  682. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  683. msdu_ext_desc->vaddr);
  684. return msdu_ext_desc;
  685. }
  686. /**
  687. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  688. *
  689. * @skb: skb to be traced
  690. * @msdu_id: msdu_id of the packet
  691. * @vdev_id: vdev_id of the packet
  692. *
  693. * Return: None
  694. */
  695. #ifdef DP_DISABLE_TX_PKT_TRACE
  696. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  697. uint8_t vdev_id)
  698. {
  699. }
  700. #else
  701. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  702. uint8_t vdev_id)
  703. {
  704. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  705. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  706. DPTRACE(qdf_dp_trace_ptr(skb,
  707. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  708. QDF_TRACE_DEFAULT_PDEV_ID,
  709. qdf_nbuf_data_addr(skb),
  710. sizeof(qdf_nbuf_data(skb)),
  711. msdu_id, vdev_id, 0));
  712. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  713. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  714. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  715. msdu_id, QDF_TX));
  716. }
  717. #endif
  718. #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
  719. /**
  720. * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
  721. * exception by the upper layer (OS_IF)
  722. * @soc: DP soc handle
  723. * @nbuf: packet to be transmitted
  724. *
  725. * Returns: 1 if the packet is marked as exception,
  726. * 0, if the packet is not marked as exception.
  727. */
  728. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  729. qdf_nbuf_t nbuf)
  730. {
  731. return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
  732. }
  733. #else
  734. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  735. qdf_nbuf_t nbuf)
  736. {
  737. return 0;
  738. }
  739. #endif
  740. /**
  741. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  742. * @vdev: DP vdev handle
  743. * @nbuf: skb
  744. * @desc_pool_id: Descriptor pool ID
  745. * @meta_data: Metadata to the fw
  746. * @tx_exc_metadata: Handle that holds exception path metadata
  747. * Allocate and prepare Tx descriptor with msdu information.
  748. *
  749. * Return: Pointer to Tx Descriptor on success,
  750. * NULL on failure
  751. */
  752. static
  753. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  754. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  755. struct dp_tx_msdu_info_s *msdu_info,
  756. struct cdp_tx_exception_metadata *tx_exc_metadata)
  757. {
  758. uint8_t align_pad;
  759. uint8_t is_exception = 0;
  760. uint8_t htt_hdr_size;
  761. struct dp_tx_desc_s *tx_desc;
  762. struct dp_pdev *pdev = vdev->pdev;
  763. struct dp_soc *soc = pdev->soc;
  764. if (dp_tx_limit_check(vdev))
  765. return NULL;
  766. /* Allocate software Tx descriptor */
  767. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  768. if (qdf_unlikely(!tx_desc)) {
  769. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  770. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  771. return NULL;
  772. }
  773. dp_tx_outstanding_inc(pdev);
  774. /* Initialize the SW tx descriptor */
  775. tx_desc->nbuf = nbuf;
  776. tx_desc->frm_type = dp_tx_frm_std;
  777. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  778. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  779. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  780. tx_desc->vdev_id = vdev->vdev_id;
  781. tx_desc->pdev = pdev;
  782. tx_desc->msdu_ext_desc = NULL;
  783. tx_desc->pkt_offset = 0;
  784. tx_desc->length = qdf_nbuf_headlen(nbuf);
  785. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  786. if (qdf_unlikely(vdev->multipass_en)) {
  787. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  788. goto failure;
  789. }
  790. /* Packets marked by upper layer (OS-IF) to be sent to FW */
  791. if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
  792. is_exception = 1;
  793. /*
  794. * For special modes (vdev_type == ocb or mesh), data frames should be
  795. * transmitted using varying transmit parameters (tx spec) which include
  796. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  797. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  798. * These frames are sent as exception packets to firmware.
  799. *
  800. * HW requirement is that metadata should always point to a
  801. * 8-byte aligned address. So we add alignment pad to start of buffer.
  802. * HTT Metadata should be ensured to be multiple of 8-bytes,
  803. * to get 8-byte aligned start address along with align_pad added
  804. *
  805. * |-----------------------------|
  806. * | |
  807. * |-----------------------------| <-----Buffer Pointer Address given
  808. * | | ^ in HW descriptor (aligned)
  809. * | HTT Metadata | |
  810. * | | |
  811. * | | | Packet Offset given in descriptor
  812. * | | |
  813. * |-----------------------------| |
  814. * | Alignment Pad | v
  815. * |-----------------------------| <----- Actual buffer start address
  816. * | SKB Data | (Unaligned)
  817. * | |
  818. * | |
  819. * | |
  820. * | |
  821. * | |
  822. * |-----------------------------|
  823. */
  824. if (qdf_unlikely((msdu_info->exception_fw)) ||
  825. (vdev->opmode == wlan_op_mode_ocb) ||
  826. (tx_exc_metadata &&
  827. tx_exc_metadata->is_tx_sniffer)) {
  828. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  829. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  830. DP_STATS_INC(vdev,
  831. tx_i.dropped.headroom_insufficient, 1);
  832. goto failure;
  833. }
  834. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  835. dp_tx_err("qdf_nbuf_push_head failed");
  836. goto failure;
  837. }
  838. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  839. msdu_info);
  840. if (htt_hdr_size == 0)
  841. goto failure;
  842. tx_desc->length = qdf_nbuf_headlen(nbuf);
  843. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  844. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  845. is_exception = 1;
  846. tx_desc->length -= tx_desc->pkt_offset;
  847. }
  848. #if !TQM_BYPASS_WAR
  849. if (is_exception || tx_exc_metadata)
  850. #endif
  851. {
  852. /* Temporary WAR due to TQM VP issues */
  853. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  854. qdf_atomic_inc(&soc->num_tx_exception);
  855. }
  856. return tx_desc;
  857. failure:
  858. dp_tx_desc_release(tx_desc, desc_pool_id);
  859. return NULL;
  860. }
  861. /**
  862. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  863. * @vdev: DP vdev handle
  864. * @nbuf: skb
  865. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  866. * @desc_pool_id : Descriptor Pool ID
  867. *
  868. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  869. * information. For frames wth fragments, allocate and prepare
  870. * an MSDU extension descriptor
  871. *
  872. * Return: Pointer to Tx Descriptor on success,
  873. * NULL on failure
  874. */
  875. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  876. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  877. uint8_t desc_pool_id)
  878. {
  879. struct dp_tx_desc_s *tx_desc;
  880. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  881. struct dp_pdev *pdev = vdev->pdev;
  882. struct dp_soc *soc = pdev->soc;
  883. if (dp_tx_limit_check(vdev))
  884. return NULL;
  885. /* Allocate software Tx descriptor */
  886. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  887. if (!tx_desc) {
  888. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  889. return NULL;
  890. }
  891. dp_tx_outstanding_inc(pdev);
  892. /* Initialize the SW tx descriptor */
  893. tx_desc->nbuf = nbuf;
  894. tx_desc->frm_type = msdu_info->frm_type;
  895. tx_desc->tx_encap_type = vdev->tx_encap_type;
  896. tx_desc->vdev_id = vdev->vdev_id;
  897. tx_desc->pdev = pdev;
  898. tx_desc->pkt_offset = 0;
  899. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  900. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  901. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  902. /* Handle scattered frames - TSO/SG/ME */
  903. /* Allocate and prepare an extension descriptor for scattered frames */
  904. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  905. if (!msdu_ext_desc) {
  906. dp_tx_info("Tx Extension Descriptor Alloc Fail");
  907. goto failure;
  908. }
  909. #if TQM_BYPASS_WAR
  910. /* Temporary WAR due to TQM VP issues */
  911. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  912. qdf_atomic_inc(&soc->num_tx_exception);
  913. #endif
  914. if (qdf_unlikely(msdu_info->exception_fw))
  915. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  916. tx_desc->msdu_ext_desc = msdu_ext_desc;
  917. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  918. tx_desc->dma_addr = msdu_ext_desc->paddr;
  919. if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  920. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  921. else
  922. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  923. return tx_desc;
  924. failure:
  925. dp_tx_desc_release(tx_desc, desc_pool_id);
  926. return NULL;
  927. }
  928. /**
  929. * dp_tx_prepare_raw() - Prepare RAW packet TX
  930. * @vdev: DP vdev handle
  931. * @nbuf: buffer pointer
  932. * @seg_info: Pointer to Segment info Descriptor to be prepared
  933. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  934. * descriptor
  935. *
  936. * Return:
  937. */
  938. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  939. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  940. {
  941. qdf_nbuf_t curr_nbuf = NULL;
  942. uint16_t total_len = 0;
  943. qdf_dma_addr_t paddr;
  944. int32_t i;
  945. int32_t mapped_buf_num = 0;
  946. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  947. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  948. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  949. /* Continue only if frames are of DATA type */
  950. if (!DP_FRAME_IS_DATA(qos_wh)) {
  951. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  952. dp_tx_debug("Pkt. recd is of not data type");
  953. goto error;
  954. }
  955. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  956. if (vdev->raw_mode_war &&
  957. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  958. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  959. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  960. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  961. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  962. /*
  963. * Number of nbuf's must not exceed the size of the frags
  964. * array in seg_info.
  965. */
  966. if (i >= DP_TX_MAX_NUM_FRAGS) {
  967. dp_err_rl("nbuf cnt exceeds the max number of segs");
  968. DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
  969. goto error;
  970. }
  971. if (QDF_STATUS_SUCCESS !=
  972. qdf_nbuf_map_nbytes_single(vdev->osdev,
  973. curr_nbuf,
  974. QDF_DMA_TO_DEVICE,
  975. curr_nbuf->len)) {
  976. dp_tx_err("%s dma map error ", __func__);
  977. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  978. goto error;
  979. }
  980. /* Update the count of mapped nbuf's */
  981. mapped_buf_num++;
  982. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  983. seg_info->frags[i].paddr_lo = paddr;
  984. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  985. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  986. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  987. total_len += qdf_nbuf_len(curr_nbuf);
  988. }
  989. seg_info->frag_cnt = i;
  990. seg_info->total_len = total_len;
  991. seg_info->next = NULL;
  992. sg_info->curr_seg = seg_info;
  993. msdu_info->frm_type = dp_tx_frm_raw;
  994. msdu_info->num_seg = 1;
  995. return nbuf;
  996. error:
  997. i = 0;
  998. while (nbuf) {
  999. curr_nbuf = nbuf;
  1000. if (i < mapped_buf_num) {
  1001. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  1002. QDF_DMA_TO_DEVICE,
  1003. curr_nbuf->len);
  1004. i++;
  1005. }
  1006. nbuf = qdf_nbuf_next(nbuf);
  1007. qdf_nbuf_free(curr_nbuf);
  1008. }
  1009. return NULL;
  1010. }
  1011. /**
  1012. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  1013. * @soc: DP soc handle
  1014. * @nbuf: Buffer pointer
  1015. *
  1016. * unmap the chain of nbufs that belong to this RAW frame.
  1017. *
  1018. * Return: None
  1019. */
  1020. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  1021. qdf_nbuf_t nbuf)
  1022. {
  1023. qdf_nbuf_t cur_nbuf = nbuf;
  1024. do {
  1025. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  1026. QDF_DMA_TO_DEVICE,
  1027. cur_nbuf->len);
  1028. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  1029. } while (cur_nbuf);
  1030. }
  1031. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1032. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
  1033. { \
  1034. qdf_nbuf_t nbuf_local; \
  1035. struct dp_vdev *vdev_local = vdev_hdl; \
  1036. do { \
  1037. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  1038. break; \
  1039. nbuf_local = nbuf; \
  1040. if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
  1041. htt_cmn_pkt_type_raw)) \
  1042. break; \
  1043. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
  1044. break; \
  1045. else if (qdf_nbuf_is_tso((nbuf_local))) \
  1046. break; \
  1047. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  1048. (nbuf_local), \
  1049. NULL, 1, 0); \
  1050. } while (0); \
  1051. }
  1052. #else
  1053. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
  1054. #endif
  1055. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1056. /**
  1057. * dp_tx_update_stats() - Update soc level tx stats
  1058. * @soc: DP soc handle
  1059. * @nbuf: packet being transmitted
  1060. *
  1061. * Returns: none
  1062. */
  1063. static inline void dp_tx_update_stats(struct dp_soc *soc,
  1064. qdf_nbuf_t nbuf)
  1065. {
  1066. DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
  1067. }
  1068. /**
  1069. * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
  1070. * @soc: Datapath soc handle
  1071. * @tx_desc: tx packet descriptor
  1072. * @tid: TID for pkt transmission
  1073. *
  1074. * Returns: 1, if coalescing is to be done
  1075. * 0, if coalescing is not to be done
  1076. */
  1077. static inline int
  1078. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1079. struct dp_tx_desc_s *tx_desc,
  1080. uint8_t tid)
  1081. {
  1082. struct dp_swlm *swlm = &soc->swlm;
  1083. union swlm_data swlm_query_data;
  1084. struct dp_swlm_tcl_data tcl_data;
  1085. QDF_STATUS status;
  1086. int ret;
  1087. if (qdf_unlikely(!swlm->is_enabled))
  1088. return 0;
  1089. tcl_data.nbuf = tx_desc->nbuf;
  1090. tcl_data.tid = tid;
  1091. tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
  1092. swlm_query_data.tcl_data = &tcl_data;
  1093. status = dp_swlm_tcl_pre_check(soc, &tcl_data);
  1094. if (QDF_IS_STATUS_ERROR(status)) {
  1095. dp_swlm_tcl_reset_session_data(soc);
  1096. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1097. return 0;
  1098. }
  1099. ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
  1100. if (ret) {
  1101. DP_STATS_INC(swlm, tcl.coalesce_success, 1);
  1102. } else {
  1103. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1104. }
  1105. return ret;
  1106. }
  1107. /**
  1108. * dp_tx_ring_access_end() - HAL ring access end for data transmission
  1109. * @soc: Datapath soc handle
  1110. * @hal_ring_hdl: HAL ring handle
  1111. * @coalesce: Coalesce the current write or not
  1112. *
  1113. * Returns: none
  1114. */
  1115. static inline void
  1116. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1117. int coalesce)
  1118. {
  1119. if (coalesce)
  1120. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1121. else
  1122. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1123. }
  1124. #else
  1125. static inline void dp_tx_update_stats(struct dp_soc *soc,
  1126. qdf_nbuf_t nbuf)
  1127. {
  1128. }
  1129. static inline int
  1130. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1131. struct dp_tx_desc_s *tx_desc,
  1132. uint8_t tid)
  1133. {
  1134. return 0;
  1135. }
  1136. static inline void
  1137. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1138. int coalesce)
  1139. {
  1140. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1141. }
  1142. #endif
  1143. #ifdef FEATURE_RUNTIME_PM
  1144. /**
  1145. * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
  1146. * @soc: Datapath soc handle
  1147. * @hal_ring_hdl: HAL ring handle
  1148. * @coalesce: Coalesce the current write or not
  1149. *
  1150. * Wrapper for HAL ring access end for data transmission for
  1151. * FEATURE_RUNTIME_PM
  1152. *
  1153. * Returns: none
  1154. */
  1155. static inline void
  1156. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1157. hal_ring_handle_t hal_ring_hdl,
  1158. int coalesce)
  1159. {
  1160. int ret;
  1161. ret = hif_pm_runtime_get(soc->hif_handle,
  1162. RTPM_ID_DW_TX_HW_ENQUEUE, true);
  1163. switch (ret) {
  1164. case 0:
  1165. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1166. hif_pm_runtime_put(soc->hif_handle,
  1167. RTPM_ID_DW_TX_HW_ENQUEUE);
  1168. break;
  1169. /*
  1170. * If hif_pm_runtime_get returns -EBUSY or -EINPROGRESS,
  1171. * take the dp runtime refcount using dp_runtime_get,
  1172. * check link state,if up, write TX ring HP, else just set flush event.
  1173. * In dp_runtime_resume, wait until dp runtime refcount becomes
  1174. * zero or time out, then flush pending tx.
  1175. */
  1176. case -EBUSY:
  1177. case -EINPROGRESS:
  1178. dp_runtime_get(soc);
  1179. if (hif_pm_get_link_state(soc->hif_handle) ==
  1180. HIF_PM_LINK_STATE_UP) {
  1181. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1182. } else {
  1183. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1184. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1185. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1186. }
  1187. dp_runtime_put(soc);
  1188. break;
  1189. default:
  1190. dp_runtime_get(soc);
  1191. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1192. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1193. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1194. dp_runtime_put(soc);
  1195. }
  1196. }
  1197. #else
  1198. static inline void
  1199. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1200. hal_ring_handle_t hal_ring_hdl,
  1201. int coalesce)
  1202. {
  1203. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1204. }
  1205. #endif
  1206. /**
  1207. * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
  1208. * @soc: DP Soc Handle
  1209. * @vdev: DP vdev handle
  1210. * @tx_desc: Tx Descriptor Handle
  1211. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1212. * @fw_metadata: Metadata to send to Target Firmware along with frame
  1213. * @ring_id: Ring ID of H/W ring to which we enqueue the packet
  1214. * @tx_exc_metadata: Handle that holds exception path meta data
  1215. *
  1216. * Gets the next free TCL HW DMA descriptor and sets up required parameters
  1217. * from software Tx descriptor
  1218. *
  1219. * Return: QDF_STATUS_SUCCESS: success
  1220. * QDF_STATUS_E_RESOURCES: Error return
  1221. */
  1222. static QDF_STATUS
  1223. dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
  1224. struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
  1225. struct cdp_tx_exception_metadata *tx_exc_metadata,
  1226. struct dp_tx_msdu_info_s *msdu_info)
  1227. {
  1228. void *hal_tx_desc;
  1229. uint32_t *hal_tx_desc_cached;
  1230. int coalesce = 0;
  1231. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1232. uint8_t ring_id = tx_q->ring_id & DP_TX_QUEUE_MASK;
  1233. uint8_t tid = msdu_info->tid;
  1234. /*
  1235. * Setting it initialization statically here to avoid
  1236. * a memset call jump with qdf_mem_set call
  1237. */
  1238. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  1239. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  1240. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  1241. tx_exc_metadata->sec_type : vdev->sec_type);
  1242. /* Return Buffer Manager ID */
  1243. uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
  1244. hal_ring_handle_t hal_ring_hdl = NULL;
  1245. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  1246. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  1247. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  1248. return QDF_STATUS_E_RESOURCES;
  1249. }
  1250. hal_tx_desc_cached = (void *) cached_desc;
  1251. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  1252. tx_desc->dma_addr, bm_id, tx_desc->id,
  1253. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG));
  1254. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  1255. vdev->lmac_id);
  1256. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  1257. vdev->search_type);
  1258. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  1259. vdev->bss_ast_idx);
  1260. hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
  1261. vdev->dscp_tid_map_id);
  1262. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  1263. sec_type_map[sec_type]);
  1264. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  1265. (vdev->bss_ast_hash & 0xF));
  1266. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  1267. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  1268. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  1269. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  1270. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  1271. vdev->hal_desc_addr_search_flags);
  1272. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  1273. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  1274. /* verify checksum offload configuration*/
  1275. if (vdev->csum_enabled &&
  1276. ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
  1277. || qdf_nbuf_is_tso(tx_desc->nbuf))) {
  1278. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  1279. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  1280. }
  1281. if (tid != HTT_TX_EXT_TID_INVALID)
  1282. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  1283. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  1284. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  1285. if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
  1286. qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
  1287. soc->wlan_cfg_ctx)))
  1288. tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  1289. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  1290. tx_desc->length,
  1291. (tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
  1292. (uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
  1293. tx_desc->id);
  1294. hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
  1295. if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
  1296. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1297. "%s %d : HAL RING Access Failed -- %pK",
  1298. __func__, __LINE__, hal_ring_hdl);
  1299. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1300. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1301. return status;
  1302. }
  1303. /* Sync cached descriptor with HW */
  1304. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  1305. if (qdf_unlikely(!hal_tx_desc)) {
  1306. dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
  1307. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1308. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1309. goto ring_access_fail;
  1310. }
  1311. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  1312. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
  1313. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
  1314. coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid);
  1315. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
  1316. dp_tx_update_stats(soc, tx_desc->nbuf);
  1317. status = QDF_STATUS_SUCCESS;
  1318. ring_access_fail:
  1319. dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, coalesce);
  1320. return status;
  1321. }
  1322. /**
  1323. * dp_cce_classify() - Classify the frame based on CCE rules
  1324. * @vdev: DP vdev handle
  1325. * @nbuf: skb
  1326. *
  1327. * Classify frames based on CCE rules
  1328. * Return: bool( true if classified,
  1329. * else false)
  1330. */
  1331. static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1332. {
  1333. qdf_ether_header_t *eh = NULL;
  1334. uint16_t ether_type;
  1335. qdf_llc_t *llcHdr;
  1336. qdf_nbuf_t nbuf_clone = NULL;
  1337. qdf_dot3_qosframe_t *qos_wh = NULL;
  1338. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1339. /*
  1340. * In case of mesh packets or hlos tid override enabled,
  1341. * don't do any classification
  1342. */
  1343. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1344. & DP_TX_SKIP_CCE_CLASSIFY))
  1345. return false;
  1346. }
  1347. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1348. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1349. ether_type = eh->ether_type;
  1350. llcHdr = (qdf_llc_t *)(nbuf->data +
  1351. sizeof(qdf_ether_header_t));
  1352. } else {
  1353. qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1354. /* For encrypted packets don't do any classification */
  1355. if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
  1356. return false;
  1357. if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
  1358. if (qdf_unlikely(
  1359. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
  1360. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
  1361. ether_type = *(uint16_t *)(nbuf->data
  1362. + QDF_IEEE80211_4ADDR_HDR_LEN
  1363. + sizeof(qdf_llc_t)
  1364. - sizeof(ether_type));
  1365. llcHdr = (qdf_llc_t *)(nbuf->data +
  1366. QDF_IEEE80211_4ADDR_HDR_LEN);
  1367. } else {
  1368. ether_type = *(uint16_t *)(nbuf->data
  1369. + QDF_IEEE80211_3ADDR_HDR_LEN
  1370. + sizeof(qdf_llc_t)
  1371. - sizeof(ether_type));
  1372. llcHdr = (qdf_llc_t *)(nbuf->data +
  1373. QDF_IEEE80211_3ADDR_HDR_LEN);
  1374. }
  1375. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
  1376. && (ether_type ==
  1377. qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
  1378. DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
  1379. return true;
  1380. }
  1381. }
  1382. return false;
  1383. }
  1384. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
  1385. ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1386. sizeof(*llcHdr));
  1387. nbuf_clone = qdf_nbuf_clone(nbuf);
  1388. if (qdf_unlikely(nbuf_clone)) {
  1389. qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
  1390. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1391. qdf_nbuf_pull_head(nbuf_clone,
  1392. sizeof(qdf_net_vlanhdr_t));
  1393. }
  1394. }
  1395. } else {
  1396. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1397. nbuf_clone = qdf_nbuf_clone(nbuf);
  1398. if (qdf_unlikely(nbuf_clone)) {
  1399. qdf_nbuf_pull_head(nbuf_clone,
  1400. sizeof(qdf_net_vlanhdr_t));
  1401. }
  1402. }
  1403. }
  1404. if (qdf_unlikely(nbuf_clone))
  1405. nbuf = nbuf_clone;
  1406. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
  1407. || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
  1408. || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
  1409. || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
  1410. || (qdf_nbuf_is_ipv4_pkt(nbuf)
  1411. && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  1412. || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
  1413. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
  1414. if (qdf_unlikely(nbuf_clone))
  1415. qdf_nbuf_free(nbuf_clone);
  1416. return true;
  1417. }
  1418. if (qdf_unlikely(nbuf_clone))
  1419. qdf_nbuf_free(nbuf_clone);
  1420. return false;
  1421. }
  1422. /**
  1423. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1424. * @vdev: DP vdev handle
  1425. * @nbuf: skb
  1426. *
  1427. * Extract the DSCP or PCP information from frame and map into TID value.
  1428. *
  1429. * Return: void
  1430. */
  1431. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1432. struct dp_tx_msdu_info_s *msdu_info)
  1433. {
  1434. uint8_t tos = 0, dscp_tid_override = 0;
  1435. uint8_t *hdr_ptr, *L3datap;
  1436. uint8_t is_mcast = 0;
  1437. qdf_ether_header_t *eh = NULL;
  1438. qdf_ethervlan_header_t *evh = NULL;
  1439. uint16_t ether_type;
  1440. qdf_llc_t *llcHdr;
  1441. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1442. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1443. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1444. eh = (qdf_ether_header_t *)nbuf->data;
  1445. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1446. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1447. } else {
  1448. qdf_dot3_qosframe_t *qos_wh =
  1449. (qdf_dot3_qosframe_t *) nbuf->data;
  1450. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1451. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1452. return;
  1453. }
  1454. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1455. ether_type = eh->ether_type;
  1456. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1457. /*
  1458. * Check if packet is dot3 or eth2 type.
  1459. */
  1460. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1461. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1462. sizeof(*llcHdr));
  1463. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1464. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1465. sizeof(*llcHdr);
  1466. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1467. + sizeof(*llcHdr) +
  1468. sizeof(qdf_net_vlanhdr_t));
  1469. } else {
  1470. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1471. sizeof(*llcHdr);
  1472. }
  1473. } else {
  1474. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1475. evh = (qdf_ethervlan_header_t *) eh;
  1476. ether_type = evh->ether_type;
  1477. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1478. }
  1479. }
  1480. /*
  1481. * Find priority from IP TOS DSCP field
  1482. */
  1483. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1484. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1485. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1486. /* Only for unicast frames */
  1487. if (!is_mcast) {
  1488. /* send it on VO queue */
  1489. msdu_info->tid = DP_VO_TID;
  1490. }
  1491. } else {
  1492. /*
  1493. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1494. * from TOS byte.
  1495. */
  1496. tos = ip->ip_tos;
  1497. dscp_tid_override = 1;
  1498. }
  1499. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1500. /* TODO
  1501. * use flowlabel
  1502. *igmpmld cases to be handled in phase 2
  1503. */
  1504. unsigned long ver_pri_flowlabel;
  1505. unsigned long pri;
  1506. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1507. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1508. DP_IPV6_PRIORITY_SHIFT;
  1509. tos = pri;
  1510. dscp_tid_override = 1;
  1511. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1512. msdu_info->tid = DP_VO_TID;
  1513. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1514. /* Only for unicast frames */
  1515. if (!is_mcast) {
  1516. /* send ucast arp on VO queue */
  1517. msdu_info->tid = DP_VO_TID;
  1518. }
  1519. }
  1520. /*
  1521. * Assign all MCAST packets to BE
  1522. */
  1523. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1524. if (is_mcast) {
  1525. tos = 0;
  1526. dscp_tid_override = 1;
  1527. }
  1528. }
  1529. if (dscp_tid_override == 1) {
  1530. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1531. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1532. }
  1533. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1534. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1535. return;
  1536. }
  1537. /**
  1538. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1539. * @vdev: DP vdev handle
  1540. * @nbuf: skb
  1541. *
  1542. * Software based TID classification is required when more than 2 DSCP-TID
  1543. * mapping tables are needed.
  1544. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1545. *
  1546. * Return: void
  1547. */
  1548. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1549. struct dp_tx_msdu_info_s *msdu_info)
  1550. {
  1551. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1552. /*
  1553. * skip_sw_tid_classification flag will set in below cases-
  1554. * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
  1555. * 2. hlos_tid_override enabled for vdev
  1556. * 3. mesh mode enabled for vdev
  1557. */
  1558. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1559. /* Update tid in msdu_info from skb priority */
  1560. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1561. & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
  1562. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1563. return;
  1564. }
  1565. return;
  1566. }
  1567. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1568. }
  1569. #ifdef FEATURE_WLAN_TDLS
  1570. /**
  1571. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1572. * @soc: datapath SOC
  1573. * @vdev: datapath vdev
  1574. * @tx_desc: TX descriptor
  1575. *
  1576. * Return: None
  1577. */
  1578. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1579. struct dp_vdev *vdev,
  1580. struct dp_tx_desc_s *tx_desc)
  1581. {
  1582. if (vdev) {
  1583. if (vdev->is_tdls_frame) {
  1584. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1585. vdev->is_tdls_frame = false;
  1586. }
  1587. }
  1588. }
  1589. /**
  1590. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1591. * @soc: dp_soc handle
  1592. * @tx_desc: TX descriptor
  1593. * @vdev: datapath vdev handle
  1594. *
  1595. * Return: None
  1596. */
  1597. static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1598. struct dp_tx_desc_s *tx_desc)
  1599. {
  1600. struct hal_tx_completion_status ts = {0};
  1601. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1602. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1603. DP_MOD_ID_TDLS);
  1604. if (qdf_unlikely(!vdev)) {
  1605. dp_err_rl("vdev is null!");
  1606. goto error;
  1607. }
  1608. hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
  1609. if (vdev->tx_non_std_data_callback.func) {
  1610. qdf_nbuf_set_next(nbuf, NULL);
  1611. vdev->tx_non_std_data_callback.func(
  1612. vdev->tx_non_std_data_callback.ctxt,
  1613. nbuf, ts.status);
  1614. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1615. return;
  1616. } else {
  1617. dp_err_rl("callback func is null");
  1618. }
  1619. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1620. error:
  1621. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1622. qdf_nbuf_free(nbuf);
  1623. }
  1624. /**
  1625. * dp_tx_msdu_single_map() - do nbuf map
  1626. * @vdev: DP vdev handle
  1627. * @tx_desc: DP TX descriptor pointer
  1628. * @nbuf: skb pointer
  1629. *
  1630. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1631. * operation done in other component.
  1632. *
  1633. * Return: QDF_STATUS
  1634. */
  1635. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1636. struct dp_tx_desc_s *tx_desc,
  1637. qdf_nbuf_t nbuf)
  1638. {
  1639. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1640. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1641. nbuf,
  1642. QDF_DMA_TO_DEVICE,
  1643. nbuf->len);
  1644. else
  1645. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1646. QDF_DMA_TO_DEVICE);
  1647. }
  1648. #else
  1649. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1650. struct dp_vdev *vdev,
  1651. struct dp_tx_desc_s *tx_desc)
  1652. {
  1653. }
  1654. static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1655. struct dp_tx_desc_s *tx_desc)
  1656. {
  1657. }
  1658. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1659. struct dp_tx_desc_s *tx_desc,
  1660. qdf_nbuf_t nbuf)
  1661. {
  1662. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1663. nbuf,
  1664. QDF_DMA_TO_DEVICE,
  1665. nbuf->len);
  1666. }
  1667. #endif
  1668. #ifdef MESH_MODE_SUPPORT
  1669. /**
  1670. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1671. * @soc: datapath SOC
  1672. * @vdev: datapath vdev
  1673. * @tx_desc: TX descriptor
  1674. *
  1675. * Return: None
  1676. */
  1677. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1678. struct dp_vdev *vdev,
  1679. struct dp_tx_desc_s *tx_desc)
  1680. {
  1681. if (qdf_unlikely(vdev->mesh_vdev))
  1682. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1683. }
  1684. /**
  1685. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1686. * @soc: dp_soc handle
  1687. * @tx_desc: TX descriptor
  1688. * @vdev: datapath vdev handle
  1689. *
  1690. * Return: None
  1691. */
  1692. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1693. struct dp_tx_desc_s *tx_desc)
  1694. {
  1695. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1696. struct dp_vdev *vdev = NULL;
  1697. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1698. qdf_nbuf_free(nbuf);
  1699. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1700. } else {
  1701. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1702. DP_MOD_ID_MESH);
  1703. if (vdev && vdev->osif_tx_free_ext)
  1704. vdev->osif_tx_free_ext((nbuf));
  1705. else
  1706. qdf_nbuf_free(nbuf);
  1707. if (vdev)
  1708. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1709. }
  1710. }
  1711. #else
  1712. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1713. struct dp_vdev *vdev,
  1714. struct dp_tx_desc_s *tx_desc)
  1715. {
  1716. }
  1717. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1718. struct dp_tx_desc_s *tx_desc)
  1719. {
  1720. }
  1721. #endif
  1722. /**
  1723. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1724. * @vdev: DP vdev handle
  1725. * @nbuf: skb
  1726. *
  1727. * Return: 1 if frame needs to be dropped else 0
  1728. */
  1729. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1730. {
  1731. struct dp_pdev *pdev = NULL;
  1732. struct dp_ast_entry *src_ast_entry = NULL;
  1733. struct dp_ast_entry *dst_ast_entry = NULL;
  1734. struct dp_soc *soc = NULL;
  1735. qdf_assert(vdev);
  1736. pdev = vdev->pdev;
  1737. qdf_assert(pdev);
  1738. soc = pdev->soc;
  1739. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1740. (soc, dstmac, vdev->pdev->pdev_id);
  1741. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1742. (soc, srcmac, vdev->pdev->pdev_id);
  1743. if (dst_ast_entry && src_ast_entry) {
  1744. if (dst_ast_entry->peer_id ==
  1745. src_ast_entry->peer_id)
  1746. return 1;
  1747. }
  1748. return 0;
  1749. }
  1750. /**
  1751. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1752. * @vdev: DP vdev handle
  1753. * @nbuf: skb
  1754. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1755. * @meta_data: Metadata to the fw
  1756. * @tx_q: Tx queue to be used for this Tx frame
  1757. * @peer_id: peer_id of the peer in case of NAWDS frames
  1758. * @tx_exc_metadata: Handle that holds exception path metadata
  1759. *
  1760. * Return: NULL on success,
  1761. * nbuf when it fails to send
  1762. */
  1763. qdf_nbuf_t
  1764. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1765. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1766. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1767. {
  1768. struct dp_pdev *pdev = vdev->pdev;
  1769. struct dp_soc *soc = pdev->soc;
  1770. struct dp_tx_desc_s *tx_desc;
  1771. QDF_STATUS status;
  1772. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1773. uint16_t htt_tcl_metadata = 0;
  1774. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  1775. uint8_t tid = msdu_info->tid;
  1776. struct cdp_tid_tx_stats *tid_stats = NULL;
  1777. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1778. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1779. msdu_info, tx_exc_metadata);
  1780. if (!tx_desc) {
  1781. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  1782. vdev, tx_q->desc_pool_id);
  1783. drop_code = TX_DESC_ERR;
  1784. goto fail_return;
  1785. }
  1786. if (qdf_unlikely(soc->cce_disable)) {
  1787. if (dp_cce_classify(vdev, nbuf) == true) {
  1788. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1789. tid = DP_VO_TID;
  1790. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1791. }
  1792. }
  1793. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  1794. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1795. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1796. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1797. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1798. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1799. HTT_TCL_METADATA_TYPE_PEER_BASED);
  1800. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1801. peer_id);
  1802. } else
  1803. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1804. if (msdu_info->exception_fw)
  1805. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1806. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  1807. !pdev->enhanced_stats_en);
  1808. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  1809. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  1810. dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
  1811. /* Handle failure */
  1812. dp_err("qdf_nbuf_map failed");
  1813. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1814. drop_code = TX_DMA_MAP_ERR;
  1815. goto release_desc;
  1816. }
  1817. tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  1818. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1819. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, htt_tcl_metadata,
  1820. tx_exc_metadata, msdu_info);
  1821. if (status != QDF_STATUS_SUCCESS) {
  1822. dp_tx_err("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1823. tx_desc, tx_q->ring_id);
  1824. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  1825. QDF_DMA_TO_DEVICE,
  1826. nbuf->len);
  1827. drop_code = TX_HW_ENQUEUE;
  1828. goto release_desc;
  1829. }
  1830. return NULL;
  1831. release_desc:
  1832. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1833. fail_return:
  1834. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1835. tid_stats = &pdev->stats.tid_stats.
  1836. tid_tx_stats[tx_q->ring_id][tid];
  1837. tid_stats->swdrop_cnt[drop_code]++;
  1838. return nbuf;
  1839. }
  1840. /**
  1841. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  1842. * @soc: Soc handle
  1843. * @desc: software Tx descriptor to be processed
  1844. *
  1845. * Return: none
  1846. */
  1847. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  1848. struct dp_tx_desc_s *desc)
  1849. {
  1850. qdf_nbuf_t nbuf = desc->nbuf;
  1851. /* nbuf already freed in vdev detach path */
  1852. if (!nbuf)
  1853. return;
  1854. /* If it is TDLS mgmt, don't unmap or free the frame */
  1855. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  1856. return dp_non_std_tx_comp_free_buff(soc, desc);
  1857. /* 0 : MSDU buffer, 1 : MLE */
  1858. if (desc->msdu_ext_desc) {
  1859. /* TSO free */
  1860. if (hal_tx_ext_desc_get_tso_enable(
  1861. desc->msdu_ext_desc->vaddr)) {
  1862. /* unmap eash TSO seg before free the nbuf */
  1863. dp_tx_tso_unmap_segment(soc, desc->tso_desc,
  1864. desc->tso_num_desc);
  1865. qdf_nbuf_free(nbuf);
  1866. return;
  1867. }
  1868. }
  1869. /* If it's ME frame, dont unmap the cloned nbuf's */
  1870. if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
  1871. goto nbuf_free;
  1872. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  1873. QDF_DMA_TO_DEVICE, nbuf->len);
  1874. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  1875. return dp_mesh_tx_comp_free_buff(soc, desc);
  1876. nbuf_free:
  1877. qdf_nbuf_free(nbuf);
  1878. }
  1879. /**
  1880. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1881. * @vdev: DP vdev handle
  1882. * @nbuf: skb
  1883. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1884. *
  1885. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1886. *
  1887. * Return: NULL on success,
  1888. * nbuf when it fails to send
  1889. */
  1890. #if QDF_LOCK_STATS
  1891. noinline
  1892. #else
  1893. #endif
  1894. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1895. struct dp_tx_msdu_info_s *msdu_info)
  1896. {
  1897. uint32_t i;
  1898. struct dp_pdev *pdev = vdev->pdev;
  1899. struct dp_soc *soc = pdev->soc;
  1900. struct dp_tx_desc_s *tx_desc;
  1901. bool is_cce_classified = false;
  1902. QDF_STATUS status;
  1903. uint16_t htt_tcl_metadata = 0;
  1904. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1905. struct cdp_tid_tx_stats *tid_stats = NULL;
  1906. uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
  1907. if (qdf_unlikely(soc->cce_disable)) {
  1908. is_cce_classified = dp_cce_classify(vdev, nbuf);
  1909. if (is_cce_classified) {
  1910. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1911. msdu_info->tid = DP_VO_TID;
  1912. }
  1913. }
  1914. if (msdu_info->frm_type == dp_tx_frm_me)
  1915. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1916. i = 0;
  1917. /* Print statement to track i and num_seg */
  1918. /*
  1919. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1920. * descriptors using information in msdu_info
  1921. */
  1922. while (i < msdu_info->num_seg) {
  1923. /*
  1924. * Setup Tx descriptor for an MSDU, and MSDU extension
  1925. * descriptor
  1926. */
  1927. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1928. tx_q->desc_pool_id);
  1929. if (!tx_desc) {
  1930. if (msdu_info->frm_type == dp_tx_frm_me) {
  1931. prep_desc_fail++;
  1932. dp_tx_me_free_buf(pdev,
  1933. (void *)(msdu_info->u.sg_info
  1934. .curr_seg->frags[0].vaddr));
  1935. if (prep_desc_fail == msdu_info->num_seg) {
  1936. /*
  1937. * Unmap is needed only if descriptor
  1938. * preparation failed for all segments.
  1939. */
  1940. qdf_nbuf_unmap(soc->osdev,
  1941. msdu_info->u.sg_info.
  1942. curr_seg->nbuf,
  1943. QDF_DMA_TO_DEVICE);
  1944. }
  1945. /*
  1946. * Free the nbuf for the current segment
  1947. * and make it point to the next in the list.
  1948. * For me, there are as many segments as there
  1949. * are no of clients.
  1950. */
  1951. qdf_nbuf_free(msdu_info->u.sg_info
  1952. .curr_seg->nbuf);
  1953. if (msdu_info->u.sg_info.curr_seg->next) {
  1954. msdu_info->u.sg_info.curr_seg =
  1955. msdu_info->u.sg_info
  1956. .curr_seg->next;
  1957. nbuf = msdu_info->u.sg_info
  1958. .curr_seg->nbuf;
  1959. }
  1960. i++;
  1961. continue;
  1962. }
  1963. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1964. dp_tx_tso_unmap_segment(soc,
  1965. msdu_info->u.tso_info.
  1966. curr_seg,
  1967. msdu_info->u.tso_info.
  1968. tso_num_seg_list);
  1969. if (msdu_info->u.tso_info.curr_seg->next) {
  1970. msdu_info->u.tso_info.curr_seg =
  1971. msdu_info->u.tso_info.curr_seg->next;
  1972. i++;
  1973. continue;
  1974. }
  1975. }
  1976. goto done;
  1977. }
  1978. if (msdu_info->frm_type == dp_tx_frm_me) {
  1979. tx_desc->me_buffer =
  1980. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1981. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1982. }
  1983. if (is_cce_classified)
  1984. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1985. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1986. if (msdu_info->exception_fw) {
  1987. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1988. }
  1989. /*
  1990. * For frames with multiple segments (TSO, ME), jump to next
  1991. * segment.
  1992. */
  1993. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1994. if (msdu_info->u.tso_info.curr_seg->next) {
  1995. msdu_info->u.tso_info.curr_seg =
  1996. msdu_info->u.tso_info.curr_seg->next;
  1997. /*
  1998. * If this is a jumbo nbuf, then increment the
  1999. * number of nbuf users for each additional
  2000. * segment of the msdu. This will ensure that
  2001. * the skb is freed only after receiving tx
  2002. * completion for all segments of an nbuf
  2003. */
  2004. qdf_nbuf_inc_users(nbuf);
  2005. /* Check with MCL if this is needed */
  2006. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
  2007. */
  2008. }
  2009. }
  2010. /*
  2011. * Enqueue the Tx MSDU descriptor to HW for transmit
  2012. */
  2013. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, htt_tcl_metadata,
  2014. NULL, msdu_info);
  2015. if (status != QDF_STATUS_SUCCESS) {
  2016. dp_info("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2017. tx_desc, tx_q->ring_id);
  2018. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2019. tid_stats = &pdev->stats.tid_stats.
  2020. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  2021. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  2022. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2023. if (msdu_info->frm_type == dp_tx_frm_me) {
  2024. hw_enq_fail++;
  2025. if (hw_enq_fail == msdu_info->num_seg) {
  2026. /*
  2027. * Unmap is needed only if enqueue
  2028. * failed for all segments.
  2029. */
  2030. qdf_nbuf_unmap(soc->osdev,
  2031. msdu_info->u.sg_info.
  2032. curr_seg->nbuf,
  2033. QDF_DMA_TO_DEVICE);
  2034. }
  2035. /*
  2036. * Free the nbuf for the current segment
  2037. * and make it point to the next in the list.
  2038. * For me, there are as many segments as there
  2039. * are no of clients.
  2040. */
  2041. qdf_nbuf_free(msdu_info->u.sg_info
  2042. .curr_seg->nbuf);
  2043. if (msdu_info->u.sg_info.curr_seg->next) {
  2044. msdu_info->u.sg_info.curr_seg =
  2045. msdu_info->u.sg_info
  2046. .curr_seg->next;
  2047. nbuf = msdu_info->u.sg_info
  2048. .curr_seg->nbuf;
  2049. } else
  2050. break;
  2051. i++;
  2052. continue;
  2053. }
  2054. /*
  2055. * For TSO frames, the nbuf users increment done for
  2056. * the current segment has to be reverted, since the
  2057. * hw enqueue for this segment failed
  2058. */
  2059. if (msdu_info->frm_type == dp_tx_frm_tso &&
  2060. msdu_info->u.tso_info.curr_seg) {
  2061. /*
  2062. * unmap and free current,
  2063. * retransmit remaining segments
  2064. */
  2065. dp_tx_comp_free_buf(soc, tx_desc);
  2066. i++;
  2067. continue;
  2068. }
  2069. goto done;
  2070. }
  2071. /*
  2072. * TODO
  2073. * if tso_info structure can be modified to have curr_seg
  2074. * as first element, following 2 blocks of code (for TSO and SG)
  2075. * can be combined into 1
  2076. */
  2077. /*
  2078. * For Multicast-Unicast converted packets,
  2079. * each converted frame (for a client) is represented as
  2080. * 1 segment
  2081. */
  2082. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  2083. (msdu_info->frm_type == dp_tx_frm_me)) {
  2084. if (msdu_info->u.sg_info.curr_seg->next) {
  2085. msdu_info->u.sg_info.curr_seg =
  2086. msdu_info->u.sg_info.curr_seg->next;
  2087. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2088. } else
  2089. break;
  2090. }
  2091. i++;
  2092. }
  2093. nbuf = NULL;
  2094. done:
  2095. return nbuf;
  2096. }
  2097. /**
  2098. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  2099. * for SG frames
  2100. * @vdev: DP vdev handle
  2101. * @nbuf: skb
  2102. * @seg_info: Pointer to Segment info Descriptor to be prepared
  2103. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2104. *
  2105. * Return: NULL on success,
  2106. * nbuf when it fails to send
  2107. */
  2108. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2109. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  2110. {
  2111. uint32_t cur_frag, nr_frags, i;
  2112. qdf_dma_addr_t paddr;
  2113. struct dp_tx_sg_info_s *sg_info;
  2114. sg_info = &msdu_info->u.sg_info;
  2115. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2116. if (QDF_STATUS_SUCCESS !=
  2117. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  2118. QDF_DMA_TO_DEVICE,
  2119. qdf_nbuf_headlen(nbuf))) {
  2120. dp_tx_err("dma map error");
  2121. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2122. qdf_nbuf_free(nbuf);
  2123. return NULL;
  2124. }
  2125. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  2126. seg_info->frags[0].paddr_lo = paddr;
  2127. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  2128. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  2129. seg_info->frags[0].vaddr = (void *) nbuf;
  2130. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  2131. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  2132. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  2133. dp_tx_err("frag dma map error");
  2134. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2135. goto map_err;
  2136. }
  2137. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  2138. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  2139. seg_info->frags[cur_frag + 1].paddr_hi =
  2140. ((uint64_t) paddr) >> 32;
  2141. seg_info->frags[cur_frag + 1].len =
  2142. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  2143. }
  2144. seg_info->frag_cnt = (cur_frag + 1);
  2145. seg_info->total_len = qdf_nbuf_len(nbuf);
  2146. seg_info->next = NULL;
  2147. sg_info->curr_seg = seg_info;
  2148. msdu_info->frm_type = dp_tx_frm_sg;
  2149. msdu_info->num_seg = 1;
  2150. return nbuf;
  2151. map_err:
  2152. /* restore paddr into nbuf before calling unmap */
  2153. qdf_nbuf_mapped_paddr_set(nbuf,
  2154. (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
  2155. ((uint64_t)
  2156. seg_info->frags[0].paddr_hi) << 32));
  2157. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2158. QDF_DMA_TO_DEVICE,
  2159. seg_info->frags[0].len);
  2160. for (i = 1; i <= cur_frag; i++) {
  2161. qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
  2162. (seg_info->frags[i].paddr_lo | ((uint64_t)
  2163. seg_info->frags[i].paddr_hi) << 32),
  2164. seg_info->frags[i].len,
  2165. QDF_DMA_TO_DEVICE);
  2166. }
  2167. qdf_nbuf_free(nbuf);
  2168. return NULL;
  2169. }
  2170. /**
  2171. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  2172. * @vdev: DP vdev handle
  2173. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2174. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  2175. *
  2176. * Return: NULL on failure,
  2177. * nbuf when extracted successfully
  2178. */
  2179. static
  2180. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  2181. struct dp_tx_msdu_info_s *msdu_info,
  2182. uint16_t ppdu_cookie)
  2183. {
  2184. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2185. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2186. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2187. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  2188. (msdu_info->meta_data[5], 1);
  2189. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  2190. (msdu_info->meta_data[5], 1);
  2191. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  2192. (msdu_info->meta_data[6], ppdu_cookie);
  2193. msdu_info->exception_fw = 1;
  2194. msdu_info->is_tx_sniffer = 1;
  2195. }
  2196. #ifdef MESH_MODE_SUPPORT
  2197. /**
  2198. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  2199. and prepare msdu_info for mesh frames.
  2200. * @vdev: DP vdev handle
  2201. * @nbuf: skb
  2202. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2203. *
  2204. * Return: NULL on failure,
  2205. * nbuf when extracted successfully
  2206. */
  2207. static
  2208. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2209. struct dp_tx_msdu_info_s *msdu_info)
  2210. {
  2211. struct meta_hdr_s *mhdr;
  2212. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2213. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2214. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2215. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  2216. msdu_info->exception_fw = 0;
  2217. goto remove_meta_hdr;
  2218. }
  2219. msdu_info->exception_fw = 1;
  2220. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2221. meta_data->host_tx_desc_pool = 1;
  2222. meta_data->update_peer_cache = 1;
  2223. meta_data->learning_frame = 1;
  2224. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  2225. meta_data->power = mhdr->power;
  2226. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  2227. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  2228. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  2229. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  2230. meta_data->dyn_bw = 1;
  2231. meta_data->valid_pwr = 1;
  2232. meta_data->valid_mcs_mask = 1;
  2233. meta_data->valid_nss_mask = 1;
  2234. meta_data->valid_preamble_type = 1;
  2235. meta_data->valid_retries = 1;
  2236. meta_data->valid_bw_info = 1;
  2237. }
  2238. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  2239. meta_data->encrypt_type = 0;
  2240. meta_data->valid_encrypt_type = 1;
  2241. meta_data->learning_frame = 0;
  2242. }
  2243. meta_data->valid_key_flags = 1;
  2244. meta_data->key_flags = (mhdr->keyix & 0x3);
  2245. remove_meta_hdr:
  2246. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2247. dp_tx_err("qdf_nbuf_pull_head failed");
  2248. qdf_nbuf_free(nbuf);
  2249. return NULL;
  2250. }
  2251. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  2252. dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
  2253. " tid %d to_fw %d",
  2254. msdu_info->meta_data[0],
  2255. msdu_info->meta_data[1],
  2256. msdu_info->meta_data[2],
  2257. msdu_info->meta_data[3],
  2258. msdu_info->meta_data[4],
  2259. msdu_info->meta_data[5],
  2260. msdu_info->tid, msdu_info->exception_fw);
  2261. return nbuf;
  2262. }
  2263. #else
  2264. static
  2265. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2266. struct dp_tx_msdu_info_s *msdu_info)
  2267. {
  2268. return nbuf;
  2269. }
  2270. #endif
  2271. /**
  2272. * dp_check_exc_metadata() - Checks if parameters are valid
  2273. * @tx_exc - holds all exception path parameters
  2274. *
  2275. * Returns true when all the parameters are valid else false
  2276. *
  2277. */
  2278. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  2279. {
  2280. bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
  2281. HTT_INVALID_TID);
  2282. bool invalid_encap_type =
  2283. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  2284. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  2285. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  2286. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  2287. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  2288. tx_exc->ppdu_cookie == 0);
  2289. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  2290. invalid_cookie) {
  2291. return false;
  2292. }
  2293. return true;
  2294. }
  2295. #ifdef ATH_SUPPORT_IQUE
  2296. /**
  2297. * dp_tx_mcast_enhance() - Multicast enhancement on TX
  2298. * @vdev: vdev handle
  2299. * @nbuf: skb
  2300. *
  2301. * Return: true on success,
  2302. * false on failure
  2303. */
  2304. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2305. {
  2306. qdf_ether_header_t *eh;
  2307. /* Mcast to Ucast Conversion*/
  2308. if (qdf_likely(!vdev->mcast_enhancement_en))
  2309. return true;
  2310. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2311. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2312. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2313. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2314. qdf_nbuf_set_next(nbuf, NULL);
  2315. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
  2316. qdf_nbuf_len(nbuf));
  2317. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2318. QDF_STATUS_SUCCESS) {
  2319. return false;
  2320. }
  2321. if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
  2322. if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
  2323. QDF_STATUS_SUCCESS) {
  2324. return false;
  2325. }
  2326. }
  2327. }
  2328. return true;
  2329. }
  2330. #else
  2331. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2332. {
  2333. return true;
  2334. }
  2335. #endif
  2336. /**
  2337. * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
  2338. * @nbuf: qdf_nbuf_t
  2339. * @vdev: struct dp_vdev *
  2340. *
  2341. * Allow packet for processing only if it is for peer client which is
  2342. * connected with same vap. Drop packet if client is connected to
  2343. * different vap.
  2344. *
  2345. * Return: QDF_STATUS
  2346. */
  2347. static inline QDF_STATUS
  2348. dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  2349. {
  2350. struct dp_ast_entry *dst_ast_entry = NULL;
  2351. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2352. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
  2353. DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
  2354. return QDF_STATUS_SUCCESS;
  2355. qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
  2356. dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
  2357. eh->ether_dhost,
  2358. vdev->vdev_id);
  2359. /* If there is no ast entry, return failure */
  2360. if (qdf_unlikely(!dst_ast_entry)) {
  2361. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2362. return QDF_STATUS_E_FAILURE;
  2363. }
  2364. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2365. return QDF_STATUS_SUCCESS;
  2366. }
  2367. /**
  2368. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  2369. * @soc: DP soc handle
  2370. * @vdev_id: id of DP vdev handle
  2371. * @nbuf: skb
  2372. * @tx_exc_metadata: Handle that holds exception path meta data
  2373. *
  2374. * Entry point for Core Tx layer (DP_TX) invoked from
  2375. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2376. *
  2377. * Return: NULL on success,
  2378. * nbuf when it fails to send
  2379. */
  2380. qdf_nbuf_t
  2381. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2382. qdf_nbuf_t nbuf,
  2383. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2384. {
  2385. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2386. qdf_ether_header_t *eh = NULL;
  2387. struct dp_tx_msdu_info_s msdu_info;
  2388. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2389. DP_MOD_ID_TX_EXCEPTION);
  2390. if (qdf_unlikely(!vdev))
  2391. goto fail;
  2392. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2393. if (!tx_exc_metadata)
  2394. goto fail;
  2395. msdu_info.tid = tx_exc_metadata->tid;
  2396. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2397. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2398. QDF_MAC_ADDR_REF(nbuf->data));
  2399. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2400. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  2401. dp_tx_err("Invalid parameters in exception path");
  2402. goto fail;
  2403. }
  2404. /* Basic sanity checks for unsupported packets */
  2405. /* MESH mode */
  2406. if (qdf_unlikely(vdev->mesh_vdev)) {
  2407. dp_tx_err("Mesh mode is not supported in exception path");
  2408. goto fail;
  2409. }
  2410. /*
  2411. * Classify the frame and call corresponding
  2412. * "prepare" function which extracts the segment (TSO)
  2413. * and fragmentation information (for TSO , SG, ME, or Raw)
  2414. * into MSDU_INFO structure which is later used to fill
  2415. * SW and HW descriptors.
  2416. */
  2417. if (qdf_nbuf_is_tso(nbuf)) {
  2418. dp_verbose_debug("TSO frame %pK", vdev);
  2419. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2420. qdf_nbuf_len(nbuf));
  2421. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2422. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2423. qdf_nbuf_len(nbuf));
  2424. goto fail;
  2425. }
  2426. goto send_multiple;
  2427. }
  2428. /* SG */
  2429. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2430. struct dp_tx_seg_info_s seg_info = {0};
  2431. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2432. if (!nbuf)
  2433. goto fail;
  2434. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2435. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2436. qdf_nbuf_len(nbuf));
  2437. goto send_multiple;
  2438. }
  2439. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2440. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2441. qdf_nbuf_len(nbuf));
  2442. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2443. tx_exc_metadata->ppdu_cookie);
  2444. }
  2445. /*
  2446. * Get HW Queue to use for this frame.
  2447. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2448. * dedicated for data and 1 for command.
  2449. * "queue_id" maps to one hardware ring.
  2450. * With each ring, we also associate a unique Tx descriptor pool
  2451. * to minimize lock contention for these resources.
  2452. */
  2453. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2454. /*
  2455. * Check exception descriptors
  2456. */
  2457. if (dp_tx_exception_limit_check(vdev))
  2458. goto fail;
  2459. /* Single linear frame */
  2460. /*
  2461. * If nbuf is a simple linear frame, use send_single function to
  2462. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2463. * SRNG. There is no need to setup a MSDU extension descriptor.
  2464. */
  2465. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2466. tx_exc_metadata->peer_id, tx_exc_metadata);
  2467. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2468. return nbuf;
  2469. send_multiple:
  2470. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2471. fail:
  2472. if (vdev)
  2473. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2474. dp_verbose_debug("pkt send failed");
  2475. return nbuf;
  2476. }
  2477. /**
  2478. * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
  2479. * in exception path in special case to avoid regular exception path chk.
  2480. * @soc: DP soc handle
  2481. * @vdev_id: id of DP vdev handle
  2482. * @nbuf: skb
  2483. * @tx_exc_metadata: Handle that holds exception path meta data
  2484. *
  2485. * Entry point for Core Tx layer (DP_TX) invoked from
  2486. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2487. *
  2488. * Return: NULL on success,
  2489. * nbuf when it fails to send
  2490. */
  2491. qdf_nbuf_t
  2492. dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2493. uint8_t vdev_id, qdf_nbuf_t nbuf,
  2494. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2495. {
  2496. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2497. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2498. DP_MOD_ID_TX_EXCEPTION);
  2499. if (qdf_unlikely(!vdev))
  2500. goto fail;
  2501. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2502. == QDF_STATUS_E_FAILURE)) {
  2503. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2504. goto fail;
  2505. }
  2506. /* Unref count as it will agin be taken inside dp_tx_exception */
  2507. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2508. return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
  2509. fail:
  2510. if (vdev)
  2511. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2512. dp_verbose_debug("pkt send failed");
  2513. return nbuf;
  2514. }
  2515. /**
  2516. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  2517. * @soc: DP soc handle
  2518. * @vdev_id: DP vdev handle
  2519. * @nbuf: skb
  2520. *
  2521. * Entry point for Core Tx layer (DP_TX) invoked from
  2522. * hard_start_xmit in OSIF/HDD
  2523. *
  2524. * Return: NULL on success,
  2525. * nbuf when it fails to send
  2526. */
  2527. #ifdef MESH_MODE_SUPPORT
  2528. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2529. qdf_nbuf_t nbuf)
  2530. {
  2531. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2532. struct meta_hdr_s *mhdr;
  2533. qdf_nbuf_t nbuf_mesh = NULL;
  2534. qdf_nbuf_t nbuf_clone = NULL;
  2535. struct dp_vdev *vdev;
  2536. uint8_t no_enc_frame = 0;
  2537. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  2538. if (!nbuf_mesh) {
  2539. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2540. "qdf_nbuf_unshare failed");
  2541. return nbuf;
  2542. }
  2543. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  2544. if (!vdev) {
  2545. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2546. "vdev is NULL for vdev_id %d", vdev_id);
  2547. return nbuf;
  2548. }
  2549. nbuf = nbuf_mesh;
  2550. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2551. if ((vdev->sec_type != cdp_sec_type_none) &&
  2552. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  2553. no_enc_frame = 1;
  2554. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  2555. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  2556. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  2557. !no_enc_frame) {
  2558. nbuf_clone = qdf_nbuf_clone(nbuf);
  2559. if (!nbuf_clone) {
  2560. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2561. "qdf_nbuf_clone failed");
  2562. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2563. return nbuf;
  2564. }
  2565. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  2566. }
  2567. if (nbuf_clone) {
  2568. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  2569. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2570. } else {
  2571. qdf_nbuf_free(nbuf_clone);
  2572. }
  2573. }
  2574. if (no_enc_frame)
  2575. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  2576. else
  2577. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  2578. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  2579. if ((!nbuf) && no_enc_frame) {
  2580. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2581. }
  2582. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2583. return nbuf;
  2584. }
  2585. #else
  2586. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  2587. qdf_nbuf_t nbuf)
  2588. {
  2589. return dp_tx_send(soc, vdev_id, nbuf);
  2590. }
  2591. #endif
  2592. /**
  2593. * dp_tx_nawds_handler() - NAWDS handler
  2594. *
  2595. * @soc: DP soc handle
  2596. * @vdev_id: id of DP vdev handle
  2597. * @msdu_info: msdu_info required to create HTT metadata
  2598. * @nbuf: skb
  2599. *
  2600. * This API transfers the multicast frames with the peer id
  2601. * on NAWDS enabled peer.
  2602. * Return: none
  2603. */
  2604. static inline
  2605. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2606. struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
  2607. {
  2608. struct dp_peer *peer = NULL;
  2609. qdf_nbuf_t nbuf_clone = NULL;
  2610. uint16_t peer_id = DP_INVALID_PEER;
  2611. uint16_t sa_peer_id = DP_INVALID_PEER;
  2612. struct dp_ast_entry *ast_entry = NULL;
  2613. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2614. qdf_spin_lock_bh(&soc->ast_lock);
  2615. ast_entry = dp_peer_ast_hash_find_by_pdevid
  2616. (soc,
  2617. (uint8_t *)(eh->ether_shost),
  2618. vdev->pdev->pdev_id);
  2619. if (ast_entry)
  2620. sa_peer_id = ast_entry->peer_id;
  2621. qdf_spin_unlock_bh(&soc->ast_lock);
  2622. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2623. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2624. if (!peer->bss_peer && peer->nawds_enabled) {
  2625. peer_id = peer->peer_id;
  2626. /* Multicast packets needs to be
  2627. * dropped in case of intra bss forwarding
  2628. */
  2629. if (sa_peer_id == peer->peer_id) {
  2630. QDF_TRACE(QDF_MODULE_ID_DP,
  2631. QDF_TRACE_LEVEL_DEBUG,
  2632. " %s: multicast packet", __func__);
  2633. DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
  2634. continue;
  2635. }
  2636. nbuf_clone = qdf_nbuf_clone(nbuf);
  2637. if (!nbuf_clone) {
  2638. QDF_TRACE(QDF_MODULE_ID_DP,
  2639. QDF_TRACE_LEVEL_ERROR,
  2640. FL("nbuf clone failed"));
  2641. break;
  2642. }
  2643. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2644. msdu_info, peer_id,
  2645. NULL);
  2646. if (nbuf_clone) {
  2647. QDF_TRACE(QDF_MODULE_ID_DP,
  2648. QDF_TRACE_LEVEL_DEBUG,
  2649. FL("pkt send failed"));
  2650. qdf_nbuf_free(nbuf_clone);
  2651. } else {
  2652. if (peer_id != DP_INVALID_PEER)
  2653. DP_STATS_INC_PKT(peer, tx.nawds_mcast,
  2654. 1, qdf_nbuf_len(nbuf));
  2655. }
  2656. }
  2657. }
  2658. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2659. }
  2660. /**
  2661. * dp_tx_send() - Transmit a frame on a given VAP
  2662. * @soc: DP soc handle
  2663. * @vdev_id: id of DP vdev handle
  2664. * @nbuf: skb
  2665. *
  2666. * Entry point for Core Tx layer (DP_TX) invoked from
  2667. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  2668. * cases
  2669. *
  2670. * Return: NULL on success,
  2671. * nbuf when it fails to send
  2672. */
  2673. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2674. qdf_nbuf_t nbuf)
  2675. {
  2676. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2677. uint16_t peer_id = HTT_INVALID_PEER;
  2678. /*
  2679. * doing a memzero is causing additional function call overhead
  2680. * so doing static stack clearing
  2681. */
  2682. struct dp_tx_msdu_info_s msdu_info = {0};
  2683. struct dp_vdev *vdev = NULL;
  2684. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2685. return nbuf;
  2686. /*
  2687. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2688. * this in per packet path.
  2689. *
  2690. * As in this path vdev memory is already protected with netdev
  2691. * tx lock
  2692. */
  2693. vdev = soc->vdev_id_map[vdev_id];
  2694. if (qdf_unlikely(!vdev))
  2695. return nbuf;
  2696. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2697. QDF_MAC_ADDR_REF(nbuf->data));
  2698. /*
  2699. * Set Default Host TID value to invalid TID
  2700. * (TID override disabled)
  2701. */
  2702. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  2703. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2704. if (qdf_unlikely(vdev->mesh_vdev)) {
  2705. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  2706. &msdu_info);
  2707. if (!nbuf_mesh) {
  2708. dp_verbose_debug("Extracting mesh metadata failed");
  2709. return nbuf;
  2710. }
  2711. nbuf = nbuf_mesh;
  2712. }
  2713. /*
  2714. * Get HW Queue to use for this frame.
  2715. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2716. * dedicated for data and 1 for command.
  2717. * "queue_id" maps to one hardware ring.
  2718. * With each ring, we also associate a unique Tx descriptor pool
  2719. * to minimize lock contention for these resources.
  2720. */
  2721. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2722. /*
  2723. * TCL H/W supports 2 DSCP-TID mapping tables.
  2724. * Table 1 - Default DSCP-TID mapping table
  2725. * Table 2 - 1 DSCP-TID override table
  2726. *
  2727. * If we need a different DSCP-TID mapping for this vap,
  2728. * call tid_classify to extract DSCP/ToS from frame and
  2729. * map to a TID and store in msdu_info. This is later used
  2730. * to fill in TCL Input descriptor (per-packet TID override).
  2731. */
  2732. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  2733. /*
  2734. * Classify the frame and call corresponding
  2735. * "prepare" function which extracts the segment (TSO)
  2736. * and fragmentation information (for TSO , SG, ME, or Raw)
  2737. * into MSDU_INFO structure which is later used to fill
  2738. * SW and HW descriptors.
  2739. */
  2740. if (qdf_nbuf_is_tso(nbuf)) {
  2741. dp_verbose_debug("TSO frame %pK", vdev);
  2742. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2743. qdf_nbuf_len(nbuf));
  2744. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2745. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2746. qdf_nbuf_len(nbuf));
  2747. return nbuf;
  2748. }
  2749. goto send_multiple;
  2750. }
  2751. /* SG */
  2752. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2753. struct dp_tx_seg_info_s seg_info = {0};
  2754. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2755. if (!nbuf)
  2756. return NULL;
  2757. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2758. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2759. qdf_nbuf_len(nbuf));
  2760. goto send_multiple;
  2761. }
  2762. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  2763. return NULL;
  2764. /* RAW */
  2765. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  2766. struct dp_tx_seg_info_s seg_info = {0};
  2767. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  2768. if (!nbuf)
  2769. return NULL;
  2770. dp_verbose_debug("Raw frame %pK", vdev);
  2771. goto send_multiple;
  2772. }
  2773. if (qdf_unlikely(vdev->nawds_enabled)) {
  2774. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2775. qdf_nbuf_data(nbuf);
  2776. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
  2777. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
  2778. peer_id = DP_INVALID_PEER;
  2779. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2780. 1, qdf_nbuf_len(nbuf));
  2781. }
  2782. /* Single linear frame */
  2783. /*
  2784. * If nbuf is a simple linear frame, use send_single function to
  2785. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2786. * SRNG. There is no need to setup a MSDU extension descriptor.
  2787. */
  2788. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  2789. return nbuf;
  2790. send_multiple:
  2791. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2792. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  2793. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  2794. return nbuf;
  2795. }
  2796. /**
  2797. * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
  2798. * case to vaoid check in perpkt path.
  2799. * @soc: DP soc handle
  2800. * @vdev_id: id of DP vdev handle
  2801. * @nbuf: skb
  2802. *
  2803. * Entry point for Core Tx layer (DP_TX) invoked from
  2804. * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
  2805. * with special condition to avoid per pkt check in dp_tx_send
  2806. *
  2807. * Return: NULL on success,
  2808. * nbuf when it fails to send
  2809. */
  2810. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2811. uint8_t vdev_id, qdf_nbuf_t nbuf)
  2812. {
  2813. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2814. struct dp_vdev *vdev = NULL;
  2815. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2816. return nbuf;
  2817. /*
  2818. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2819. * this in per packet path.
  2820. *
  2821. * As in this path vdev memory is already protected with netdev
  2822. * tx lock
  2823. */
  2824. vdev = soc->vdev_id_map[vdev_id];
  2825. if (qdf_unlikely(!vdev))
  2826. return nbuf;
  2827. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2828. == QDF_STATUS_E_FAILURE)) {
  2829. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2830. return nbuf;
  2831. }
  2832. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  2833. }
  2834. /**
  2835. * dp_tx_reinject_handler() - Tx Reinject Handler
  2836. * @soc: datapath soc handle
  2837. * @vdev: datapath vdev handle
  2838. * @tx_desc: software descriptor head pointer
  2839. * @status : Tx completion status from HTT descriptor
  2840. *
  2841. * This function reinjects frames back to Target.
  2842. * Todo - Host queue needs to be added
  2843. *
  2844. * Return: none
  2845. */
  2846. static
  2847. void dp_tx_reinject_handler(struct dp_soc *soc,
  2848. struct dp_vdev *vdev,
  2849. struct dp_tx_desc_s *tx_desc,
  2850. uint8_t *status)
  2851. {
  2852. struct dp_peer *peer = NULL;
  2853. uint32_t peer_id = HTT_INVALID_PEER;
  2854. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2855. qdf_nbuf_t nbuf_copy = NULL;
  2856. struct dp_tx_msdu_info_s msdu_info;
  2857. #ifdef WDS_VENDOR_EXTENSION
  2858. int is_mcast = 0, is_ucast = 0;
  2859. int num_peers_3addr = 0;
  2860. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  2861. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  2862. #endif
  2863. qdf_assert(vdev);
  2864. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2865. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2866. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2867. "%s Tx reinject path", __func__);
  2868. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  2869. qdf_nbuf_len(tx_desc->nbuf));
  2870. #ifdef WDS_VENDOR_EXTENSION
  2871. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  2872. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  2873. } else {
  2874. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  2875. }
  2876. is_ucast = !is_mcast;
  2877. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2878. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2879. if (peer->bss_peer)
  2880. continue;
  2881. /* Detect wds peers that use 3-addr framing for mcast.
  2882. * if there are any, the bss_peer is used to send the
  2883. * the mcast frame using 3-addr format. all wds enabled
  2884. * peers that use 4-addr framing for mcast frames will
  2885. * be duplicated and sent as 4-addr frames below.
  2886. */
  2887. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  2888. num_peers_3addr = 1;
  2889. break;
  2890. }
  2891. }
  2892. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2893. #endif
  2894. if (qdf_unlikely(vdev->mesh_vdev)) {
  2895. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  2896. } else {
  2897. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2898. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2899. if ((peer->peer_id != HTT_INVALID_PEER) &&
  2900. #ifdef WDS_VENDOR_EXTENSION
  2901. /*
  2902. * . if 3-addr STA, then send on BSS Peer
  2903. * . if Peer WDS enabled and accept 4-addr mcast,
  2904. * send mcast on that peer only
  2905. * . if Peer WDS enabled and accept 4-addr ucast,
  2906. * send ucast on that peer only
  2907. */
  2908. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  2909. (peer->wds_enabled &&
  2910. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  2911. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  2912. #else
  2913. ((peer->bss_peer &&
  2914. !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) {
  2915. #endif
  2916. peer_id = DP_INVALID_PEER;
  2917. nbuf_copy = qdf_nbuf_copy(nbuf);
  2918. if (!nbuf_copy) {
  2919. QDF_TRACE(QDF_MODULE_ID_DP,
  2920. QDF_TRACE_LEVEL_DEBUG,
  2921. FL("nbuf copy failed"));
  2922. break;
  2923. }
  2924. nbuf_copy = dp_tx_send_msdu_single(vdev,
  2925. nbuf_copy,
  2926. &msdu_info,
  2927. peer_id,
  2928. NULL);
  2929. if (nbuf_copy) {
  2930. QDF_TRACE(QDF_MODULE_ID_DP,
  2931. QDF_TRACE_LEVEL_DEBUG,
  2932. FL("pkt send failed"));
  2933. qdf_nbuf_free(nbuf_copy);
  2934. }
  2935. }
  2936. }
  2937. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2938. }
  2939. qdf_nbuf_free(nbuf);
  2940. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2941. }
  2942. /**
  2943. * dp_tx_inspect_handler() - Tx Inspect Handler
  2944. * @soc: datapath soc handle
  2945. * @vdev: datapath vdev handle
  2946. * @tx_desc: software descriptor head pointer
  2947. * @status : Tx completion status from HTT descriptor
  2948. *
  2949. * Handles Tx frames sent back to Host for inspection
  2950. * (ProxyARP)
  2951. *
  2952. * Return: none
  2953. */
  2954. static void dp_tx_inspect_handler(struct dp_soc *soc,
  2955. struct dp_vdev *vdev,
  2956. struct dp_tx_desc_s *tx_desc,
  2957. uint8_t *status)
  2958. {
  2959. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2960. "%s Tx inspect path",
  2961. __func__);
  2962. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  2963. qdf_nbuf_len(tx_desc->nbuf));
  2964. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  2965. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2966. }
  2967. #ifdef MESH_MODE_SUPPORT
  2968. /**
  2969. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  2970. * in mesh meta header
  2971. * @tx_desc: software descriptor head pointer
  2972. * @ts: pointer to tx completion stats
  2973. * Return: none
  2974. */
  2975. static
  2976. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2977. struct hal_tx_completion_status *ts)
  2978. {
  2979. struct meta_hdr_s *mhdr;
  2980. qdf_nbuf_t netbuf = tx_desc->nbuf;
  2981. if (!tx_desc->msdu_ext_desc) {
  2982. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  2983. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2984. "netbuf %pK offset %d",
  2985. netbuf, tx_desc->pkt_offset);
  2986. return;
  2987. }
  2988. }
  2989. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2990. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2991. "netbuf %pK offset %zu", netbuf,
  2992. sizeof(struct meta_hdr_s));
  2993. return;
  2994. }
  2995. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  2996. mhdr->rssi = ts->ack_frame_rssi;
  2997. mhdr->band = tx_desc->pdev->operating_channel.band;
  2998. mhdr->channel = tx_desc->pdev->operating_channel.num;
  2999. }
  3000. #else
  3001. static
  3002. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3003. struct hal_tx_completion_status *ts)
  3004. {
  3005. }
  3006. #endif
  3007. #ifdef QCA_PEER_EXT_STATS
  3008. /*
  3009. * dp_tx_compute_tid_delay() - Compute per TID delay
  3010. * @stats: Per TID delay stats
  3011. * @tx_desc: Software Tx descriptor
  3012. *
  3013. * Compute the software enqueue and hw enqueue delays and
  3014. * update the respective histograms
  3015. *
  3016. * Return: void
  3017. */
  3018. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3019. struct dp_tx_desc_s *tx_desc)
  3020. {
  3021. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3022. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3023. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  3024. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3025. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3026. timestamp_hw_enqueue = tx_desc->timestamp;
  3027. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3028. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3029. timestamp_hw_enqueue);
  3030. /*
  3031. * Update the Tx software enqueue delay and HW enque-Completion delay.
  3032. */
  3033. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3034. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  3035. }
  3036. /*
  3037. * dp_tx_update_peer_ext_stats() - Update the peer extended stats
  3038. * @peer: DP peer context
  3039. * @tx_desc: Tx software descriptor
  3040. * @tid: Transmission ID
  3041. * @ring_id: Rx CPU context ID/CPU_ID
  3042. *
  3043. * Update the peer extended stats. These are enhanced other
  3044. * delay stats per msdu level.
  3045. *
  3046. * Return: void
  3047. */
  3048. static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3049. struct dp_tx_desc_s *tx_desc,
  3050. uint8_t tid, uint8_t ring_id)
  3051. {
  3052. struct dp_pdev *pdev = peer->vdev->pdev;
  3053. struct dp_soc *soc = NULL;
  3054. struct cdp_peer_ext_stats *pext_stats = NULL;
  3055. soc = pdev->soc;
  3056. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  3057. return;
  3058. pext_stats = peer->pext_stats;
  3059. qdf_assert(pext_stats);
  3060. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3061. /*
  3062. * For non-TID packets use the TID 9
  3063. */
  3064. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3065. tid = CDP_MAX_DATA_TIDS - 1;
  3066. dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
  3067. tx_desc);
  3068. }
  3069. #else
  3070. static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3071. struct dp_tx_desc_s *tx_desc,
  3072. uint8_t tid, uint8_t ring_id)
  3073. {
  3074. }
  3075. #endif
  3076. /**
  3077. * dp_tx_compute_delay() - Compute and fill in all timestamps
  3078. * to pass in correct fields
  3079. *
  3080. * @vdev: pdev handle
  3081. * @tx_desc: tx descriptor
  3082. * @tid: tid value
  3083. * @ring_id: TCL or WBM ring number for transmit path
  3084. * Return: none
  3085. */
  3086. static void dp_tx_compute_delay(struct dp_vdev *vdev,
  3087. struct dp_tx_desc_s *tx_desc,
  3088. uint8_t tid, uint8_t ring_id)
  3089. {
  3090. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3091. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  3092. if (qdf_likely(!vdev->pdev->delay_stats_flag))
  3093. return;
  3094. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3095. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3096. timestamp_hw_enqueue = tx_desc->timestamp;
  3097. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3098. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3099. timestamp_hw_enqueue);
  3100. interframe_delay = (uint32_t)(timestamp_ingress -
  3101. vdev->prev_tx_enq_tstamp);
  3102. /*
  3103. * Delay in software enqueue
  3104. */
  3105. dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
  3106. CDP_DELAY_STATS_SW_ENQ, ring_id);
  3107. /*
  3108. * Delay between packet enqueued to HW and Tx completion
  3109. */
  3110. dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
  3111. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
  3112. /*
  3113. * Update interframe delay stats calculated at hardstart receive point.
  3114. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  3115. * interframe delay will not be calculate correctly for 1st frame.
  3116. * On the other side, this will help in avoiding extra per packet check
  3117. * of !vdev->prev_tx_enq_tstamp.
  3118. */
  3119. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  3120. CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
  3121. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  3122. }
  3123. #ifdef DISABLE_DP_STATS
  3124. static
  3125. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3126. {
  3127. }
  3128. #else
  3129. static
  3130. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3131. {
  3132. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  3133. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  3134. if (subtype != QDF_PROTO_INVALID)
  3135. DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
  3136. }
  3137. #endif
  3138. /**
  3139. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  3140. * per wbm ring
  3141. *
  3142. * @tx_desc: software descriptor head pointer
  3143. * @ts: Tx completion status
  3144. * @peer: peer handle
  3145. * @ring_id: ring number
  3146. *
  3147. * Return: None
  3148. */
  3149. static inline void
  3150. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  3151. struct hal_tx_completion_status *ts,
  3152. struct dp_peer *peer, uint8_t ring_id)
  3153. {
  3154. struct dp_pdev *pdev = peer->vdev->pdev;
  3155. struct dp_soc *soc = NULL;
  3156. uint8_t mcs, pkt_type;
  3157. uint8_t tid = ts->tid;
  3158. uint32_t length;
  3159. struct cdp_tid_tx_stats *tid_stats;
  3160. if (!pdev)
  3161. return;
  3162. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3163. tid = CDP_MAX_DATA_TIDS - 1;
  3164. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3165. soc = pdev->soc;
  3166. mcs = ts->mcs;
  3167. pkt_type = ts->pkt_type;
  3168. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  3169. dp_err("Release source is not from TQM");
  3170. return;
  3171. }
  3172. length = qdf_nbuf_len(tx_desc->nbuf);
  3173. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  3174. if (qdf_unlikely(pdev->delay_stats_flag))
  3175. dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
  3176. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  3177. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  3178. DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
  3179. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3180. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  3181. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  3182. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  3183. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  3184. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  3185. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  3186. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  3187. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  3188. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  3189. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  3190. /*
  3191. * tx_failed is ideally supposed to be updated from HTT ppdu completion
  3192. * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
  3193. * are no completions for failed cases. Hence updating tx_failed from
  3194. * data path. Please note that if tx_failed is fixed to be from ppdu,
  3195. * then this has to be removed
  3196. */
  3197. peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
  3198. peer->stats.tx.dropped.fw_rem_notx +
  3199. peer->stats.tx.dropped.fw_rem_tx +
  3200. peer->stats.tx.dropped.age_out +
  3201. peer->stats.tx.dropped.fw_reason1 +
  3202. peer->stats.tx.dropped.fw_reason2 +
  3203. peer->stats.tx.dropped.fw_reason3;
  3204. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  3205. tid_stats->tqm_status_cnt[ts->status]++;
  3206. }
  3207. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  3208. dp_update_no_ack_stats(tx_desc->nbuf, peer);
  3209. return;
  3210. }
  3211. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  3212. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  3213. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  3214. /*
  3215. * Following Rate Statistics are updated from HTT PPDU events from FW.
  3216. * Return from here if HTT PPDU events are enabled.
  3217. */
  3218. if (!(soc->process_tx_status))
  3219. return;
  3220. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3221. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  3222. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3223. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  3224. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3225. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3226. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3227. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3228. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3229. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3230. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3231. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3232. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3233. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3234. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3235. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3236. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3237. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3238. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3239. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3240. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  3241. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  3242. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  3243. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  3244. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  3245. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  3246. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  3247. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  3248. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  3249. &peer->stats, ts->peer_id,
  3250. UPDATE_PEER_STATS, pdev->pdev_id);
  3251. #endif
  3252. }
  3253. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3254. /**
  3255. * dp_tx_flow_pool_lock() - take flow pool lock
  3256. * @soc: core txrx main context
  3257. * @tx_desc: tx desc
  3258. *
  3259. * Return: None
  3260. */
  3261. static inline
  3262. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  3263. struct dp_tx_desc_s *tx_desc)
  3264. {
  3265. struct dp_tx_desc_pool_s *pool;
  3266. uint8_t desc_pool_id;
  3267. desc_pool_id = tx_desc->pool_id;
  3268. pool = &soc->tx_desc[desc_pool_id];
  3269. qdf_spin_lock_bh(&pool->flow_pool_lock);
  3270. }
  3271. /**
  3272. * dp_tx_flow_pool_unlock() - release flow pool lock
  3273. * @soc: core txrx main context
  3274. * @tx_desc: tx desc
  3275. *
  3276. * Return: None
  3277. */
  3278. static inline
  3279. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  3280. struct dp_tx_desc_s *tx_desc)
  3281. {
  3282. struct dp_tx_desc_pool_s *pool;
  3283. uint8_t desc_pool_id;
  3284. desc_pool_id = tx_desc->pool_id;
  3285. pool = &soc->tx_desc[desc_pool_id];
  3286. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  3287. }
  3288. #else
  3289. static inline
  3290. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3291. {
  3292. }
  3293. static inline
  3294. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3295. {
  3296. }
  3297. #endif
  3298. /**
  3299. * dp_tx_notify_completion() - Notify tx completion for this desc
  3300. * @soc: core txrx main context
  3301. * @vdev: datapath vdev handle
  3302. * @tx_desc: tx desc
  3303. * @netbuf: buffer
  3304. * @status: tx status
  3305. *
  3306. * Return: none
  3307. */
  3308. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  3309. struct dp_vdev *vdev,
  3310. struct dp_tx_desc_s *tx_desc,
  3311. qdf_nbuf_t netbuf,
  3312. uint8_t status)
  3313. {
  3314. void *osif_dev;
  3315. ol_txrx_completion_fp tx_compl_cbk = NULL;
  3316. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  3317. qdf_assert(tx_desc);
  3318. dp_tx_flow_pool_lock(soc, tx_desc);
  3319. if (!vdev ||
  3320. !vdev->osif_vdev) {
  3321. dp_tx_flow_pool_unlock(soc, tx_desc);
  3322. return;
  3323. }
  3324. osif_dev = vdev->osif_vdev;
  3325. tx_compl_cbk = vdev->tx_comp;
  3326. dp_tx_flow_pool_unlock(soc, tx_desc);
  3327. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3328. flag |= BIT(QDF_TX_RX_STATUS_OK);
  3329. if (tx_compl_cbk)
  3330. tx_compl_cbk(netbuf, osif_dev, flag);
  3331. }
  3332. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  3333. * @pdev: pdev handle
  3334. * @tid: tid value
  3335. * @txdesc_ts: timestamp from txdesc
  3336. * @ppdu_id: ppdu id
  3337. *
  3338. * Return: none
  3339. */
  3340. #ifdef FEATURE_PERPKT_INFO
  3341. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3342. struct dp_peer *peer,
  3343. uint8_t tid,
  3344. uint64_t txdesc_ts,
  3345. uint32_t ppdu_id)
  3346. {
  3347. uint64_t delta_ms;
  3348. struct cdp_tx_sojourn_stats *sojourn_stats;
  3349. if (qdf_unlikely(pdev->enhanced_stats_en == 0))
  3350. return;
  3351. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  3352. tid >= CDP_DATA_TID_MAX))
  3353. return;
  3354. if (qdf_unlikely(!pdev->sojourn_buf))
  3355. return;
  3356. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  3357. qdf_nbuf_data(pdev->sojourn_buf);
  3358. sojourn_stats->cookie = (void *)peer->rdkstats_ctx;
  3359. delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
  3360. txdesc_ts;
  3361. qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
  3362. delta_ms);
  3363. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  3364. sojourn_stats->num_msdus[tid] = 1;
  3365. sojourn_stats->avg_sojourn_msdu[tid].internal =
  3366. peer->avg_sojourn_msdu[tid].internal;
  3367. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  3368. pdev->sojourn_buf, HTT_INVALID_PEER,
  3369. WDI_NO_VAL, pdev->pdev_id);
  3370. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  3371. sojourn_stats->num_msdus[tid] = 0;
  3372. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  3373. }
  3374. #else
  3375. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3376. struct dp_peer *peer,
  3377. uint8_t tid,
  3378. uint64_t txdesc_ts,
  3379. uint32_t ppdu_id)
  3380. {
  3381. }
  3382. #endif
  3383. #ifdef WLAN_FEATURE_PKT_CAPTURE_LITHIUM
  3384. /**
  3385. * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
  3386. * @soc: dp_soc handle
  3387. * @desc: Tx Descriptor
  3388. * @ts: HAL Tx completion descriptor contents
  3389. *
  3390. * This function is used to send tx completion to packet capture
  3391. */
  3392. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  3393. struct dp_tx_desc_s *desc,
  3394. struct hal_tx_completion_status *ts)
  3395. {
  3396. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
  3397. desc, ts->peer_id,
  3398. WDI_NO_VAL, desc->pdev->pdev_id);
  3399. }
  3400. #endif
  3401. /**
  3402. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  3403. * @soc: DP Soc handle
  3404. * @tx_desc: software Tx descriptor
  3405. * @ts : Tx completion status from HAL/HTT descriptor
  3406. *
  3407. * Return: none
  3408. */
  3409. static inline void
  3410. dp_tx_comp_process_desc(struct dp_soc *soc,
  3411. struct dp_tx_desc_s *desc,
  3412. struct hal_tx_completion_status *ts,
  3413. struct dp_peer *peer)
  3414. {
  3415. uint64_t time_latency = 0;
  3416. /*
  3417. * m_copy/tx_capture modes are not supported for
  3418. * scatter gather packets
  3419. */
  3420. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  3421. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  3422. desc->timestamp);
  3423. }
  3424. dp_send_completion_to_pkt_capture(soc, desc, ts);
  3425. if (!(desc->msdu_ext_desc)) {
  3426. if (QDF_STATUS_SUCCESS ==
  3427. dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
  3428. return;
  3429. }
  3430. if (QDF_STATUS_SUCCESS ==
  3431. dp_get_completion_indication_for_stack(soc,
  3432. desc->pdev,
  3433. peer, ts,
  3434. desc->nbuf,
  3435. time_latency)) {
  3436. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  3437. QDF_DMA_TO_DEVICE,
  3438. desc->nbuf->len);
  3439. dp_send_completion_to_stack(soc,
  3440. desc->pdev,
  3441. ts->peer_id,
  3442. ts->ppdu_id,
  3443. desc->nbuf);
  3444. return;
  3445. }
  3446. }
  3447. dp_tx_comp_free_buf(soc, desc);
  3448. }
  3449. #ifdef DISABLE_DP_STATS
  3450. /**
  3451. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  3452. * @soc: core txrx main context
  3453. * @tx_desc: tx desc
  3454. * @status: tx status
  3455. *
  3456. * Return: none
  3457. */
  3458. static inline
  3459. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3460. struct dp_vdev *vdev,
  3461. struct dp_tx_desc_s *tx_desc,
  3462. uint8_t status)
  3463. {
  3464. }
  3465. #else
  3466. static inline
  3467. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3468. struct dp_vdev *vdev,
  3469. struct dp_tx_desc_s *tx_desc,
  3470. uint8_t status)
  3471. {
  3472. void *osif_dev;
  3473. ol_txrx_stats_rx_fp stats_cbk;
  3474. uint8_t pkt_type;
  3475. qdf_assert(tx_desc);
  3476. if (!vdev ||
  3477. !vdev->osif_vdev ||
  3478. !vdev->stats_cb)
  3479. return;
  3480. osif_dev = vdev->osif_vdev;
  3481. stats_cbk = vdev->stats_cb;
  3482. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  3483. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3484. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  3485. &pkt_type);
  3486. }
  3487. #endif
  3488. /**
  3489. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  3490. * @soc: DP soc handle
  3491. * @tx_desc: software descriptor head pointer
  3492. * @ts: Tx completion status
  3493. * @peer: peer handle
  3494. * @ring_id: ring number
  3495. *
  3496. * Return: none
  3497. */
  3498. static inline
  3499. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  3500. struct dp_tx_desc_s *tx_desc,
  3501. struct hal_tx_completion_status *ts,
  3502. struct dp_peer *peer, uint8_t ring_id)
  3503. {
  3504. uint32_t length;
  3505. qdf_ether_header_t *eh;
  3506. struct dp_vdev *vdev = NULL;
  3507. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3508. enum qdf_dp_tx_rx_status dp_status;
  3509. if (!nbuf) {
  3510. dp_info_rl("invalid tx descriptor. nbuf NULL");
  3511. goto out;
  3512. }
  3513. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  3514. length = qdf_nbuf_len(nbuf);
  3515. dp_status = dp_tx_hw_to_qdf(ts->status);
  3516. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  3517. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  3518. QDF_TRACE_DEFAULT_PDEV_ID,
  3519. qdf_nbuf_data_addr(nbuf),
  3520. sizeof(qdf_nbuf_data(nbuf)),
  3521. tx_desc->id, ts->status, dp_status));
  3522. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3523. "-------------------- \n"
  3524. "Tx Completion Stats: \n"
  3525. "-------------------- \n"
  3526. "ack_frame_rssi = %d \n"
  3527. "first_msdu = %d \n"
  3528. "last_msdu = %d \n"
  3529. "msdu_part_of_amsdu = %d \n"
  3530. "rate_stats valid = %d \n"
  3531. "bw = %d \n"
  3532. "pkt_type = %d \n"
  3533. "stbc = %d \n"
  3534. "ldpc = %d \n"
  3535. "sgi = %d \n"
  3536. "mcs = %d \n"
  3537. "ofdma = %d \n"
  3538. "tones_in_ru = %d \n"
  3539. "tsf = %d \n"
  3540. "ppdu_id = %d \n"
  3541. "transmit_cnt = %d \n"
  3542. "tid = %d \n"
  3543. "peer_id = %d\n",
  3544. ts->ack_frame_rssi, ts->first_msdu,
  3545. ts->last_msdu, ts->msdu_part_of_amsdu,
  3546. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  3547. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  3548. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  3549. ts->transmit_cnt, ts->tid, ts->peer_id);
  3550. /* Update SoC level stats */
  3551. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  3552. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3553. if (!peer) {
  3554. dp_info_rl("peer is null or deletion in progress");
  3555. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  3556. goto out;
  3557. }
  3558. vdev = peer->vdev;
  3559. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  3560. /* Update per-packet stats for mesh mode */
  3561. if (qdf_unlikely(vdev->mesh_vdev) &&
  3562. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  3563. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  3564. /* Update peer level stats */
  3565. if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
  3566. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  3567. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  3568. if ((peer->vdev->tx_encap_type ==
  3569. htt_cmn_pkt_type_ethernet) &&
  3570. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  3571. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  3572. }
  3573. }
  3574. } else {
  3575. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  3576. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  3577. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  3578. if (qdf_unlikely(peer->in_twt)) {
  3579. DP_STATS_INC_PKT(peer,
  3580. tx.tx_success_twt,
  3581. 1, length);
  3582. }
  3583. }
  3584. }
  3585. dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
  3586. dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
  3587. #ifdef QCA_SUPPORT_RDK_STATS
  3588. if (soc->rdkstats_enabled)
  3589. dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
  3590. tx_desc->timestamp,
  3591. ts->ppdu_id);
  3592. #endif
  3593. out:
  3594. return;
  3595. }
  3596. /**
  3597. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  3598. * @soc: core txrx main context
  3599. * @comp_head: software descriptor head pointer
  3600. * @ring_id: ring number
  3601. *
  3602. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  3603. * and release the software descriptors after processing is complete
  3604. *
  3605. * Return: none
  3606. */
  3607. static void
  3608. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  3609. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  3610. {
  3611. struct dp_tx_desc_s *desc;
  3612. struct dp_tx_desc_s *next;
  3613. struct hal_tx_completion_status ts;
  3614. struct dp_peer *peer = NULL;
  3615. uint16_t peer_id = DP_INVALID_PEER;
  3616. qdf_nbuf_t netbuf;
  3617. desc = comp_head;
  3618. while (desc) {
  3619. if (peer_id != desc->peer_id) {
  3620. if (peer)
  3621. dp_peer_unref_delete(peer,
  3622. DP_MOD_ID_TX_COMP);
  3623. peer_id = desc->peer_id;
  3624. peer = dp_peer_get_ref_by_id(soc, peer_id,
  3625. DP_MOD_ID_TX_COMP);
  3626. }
  3627. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  3628. struct dp_pdev *pdev = desc->pdev;
  3629. if (qdf_likely(peer)) {
  3630. /*
  3631. * Increment peer statistics
  3632. * Minimal statistics update done here
  3633. */
  3634. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
  3635. desc->length);
  3636. if (desc->tx_status !=
  3637. HAL_TX_TQM_RR_FRAME_ACKED)
  3638. DP_STATS_INC(peer, tx.tx_failed, 1);
  3639. }
  3640. qdf_assert(pdev);
  3641. dp_tx_outstanding_dec(pdev);
  3642. /*
  3643. * Calling a QDF WRAPPER here is creating signifcant
  3644. * performance impact so avoided the wrapper call here
  3645. */
  3646. next = desc->next;
  3647. qdf_mem_unmap_nbytes_single(soc->osdev,
  3648. desc->dma_addr,
  3649. QDF_DMA_TO_DEVICE,
  3650. desc->length);
  3651. qdf_nbuf_free(desc->nbuf);
  3652. dp_tx_desc_free(soc, desc, desc->pool_id);
  3653. desc = next;
  3654. continue;
  3655. }
  3656. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  3657. dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
  3658. netbuf = desc->nbuf;
  3659. /* check tx complete notification */
  3660. if (peer && qdf_nbuf_tx_notify_comp_get(netbuf))
  3661. dp_tx_notify_completion(soc, peer->vdev, desc,
  3662. netbuf, ts.status);
  3663. dp_tx_comp_process_desc(soc, desc, &ts, peer);
  3664. next = desc->next;
  3665. dp_tx_desc_release(desc, desc->pool_id);
  3666. desc = next;
  3667. }
  3668. if (peer)
  3669. dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
  3670. }
  3671. /**
  3672. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  3673. * @soc: Handle to DP soc structure
  3674. * @tx_desc: software descriptor head pointer
  3675. * @status : Tx completion status from HTT descriptor
  3676. * @ring_id: ring number
  3677. *
  3678. * This function will process HTT Tx indication messages from Target
  3679. *
  3680. * Return: none
  3681. */
  3682. static
  3683. void dp_tx_process_htt_completion(struct dp_soc *soc,
  3684. struct dp_tx_desc_s *tx_desc, uint8_t *status,
  3685. uint8_t ring_id)
  3686. {
  3687. uint8_t tx_status;
  3688. struct dp_pdev *pdev;
  3689. struct dp_vdev *vdev;
  3690. struct hal_tx_completion_status ts = {0};
  3691. uint32_t *htt_desc = (uint32_t *)status;
  3692. struct dp_peer *peer;
  3693. struct cdp_tid_tx_stats *tid_stats = NULL;
  3694. struct htt_soc *htt_handle;
  3695. uint8_t vdev_id;
  3696. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  3697. htt_handle = (struct htt_soc *)soc->htt_handle;
  3698. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  3699. /*
  3700. * There can be scenario where WBM consuming descriptor enqueued
  3701. * from TQM2WBM first and TQM completion can happen before MEC
  3702. * notification comes from FW2WBM. Avoid access any field of tx
  3703. * descriptor in case of MEC notify.
  3704. */
  3705. if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
  3706. /*
  3707. * Get vdev id from HTT status word in case of MEC
  3708. * notification
  3709. */
  3710. vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
  3711. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3712. return;
  3713. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3714. DP_MOD_ID_HTT_COMP);
  3715. if (!vdev)
  3716. return;
  3717. dp_tx_mec_handler(vdev, status);
  3718. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  3719. return;
  3720. }
  3721. /*
  3722. * If the descriptor is already freed in vdev_detach,
  3723. * continue to next descriptor
  3724. */
  3725. if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) {
  3726. QDF_TRACE(QDF_MODULE_ID_DP,
  3727. QDF_TRACE_LEVEL_INFO,
  3728. "Descriptor freed in vdev_detach %d",
  3729. tx_desc->id);
  3730. return;
  3731. }
  3732. pdev = tx_desc->pdev;
  3733. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3734. QDF_TRACE(QDF_MODULE_ID_DP,
  3735. QDF_TRACE_LEVEL_INFO,
  3736. "pdev in down state %d",
  3737. tx_desc->id);
  3738. dp_tx_comp_free_buf(soc, tx_desc);
  3739. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3740. return;
  3741. }
  3742. qdf_assert(tx_desc->pdev);
  3743. vdev_id = tx_desc->vdev_id;
  3744. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3745. DP_MOD_ID_HTT_COMP);
  3746. if (!vdev)
  3747. return;
  3748. switch (tx_status) {
  3749. case HTT_TX_FW2WBM_TX_STATUS_OK:
  3750. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  3751. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  3752. {
  3753. uint8_t tid;
  3754. if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
  3755. ts.peer_id =
  3756. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
  3757. htt_desc[2]);
  3758. ts.tid =
  3759. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
  3760. htt_desc[2]);
  3761. } else {
  3762. ts.peer_id = HTT_INVALID_PEER;
  3763. ts.tid = HTT_INVALID_TID;
  3764. }
  3765. ts.ppdu_id =
  3766. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
  3767. htt_desc[1]);
  3768. ts.ack_frame_rssi =
  3769. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
  3770. htt_desc[1]);
  3771. ts.tsf = htt_desc[3];
  3772. ts.first_msdu = 1;
  3773. ts.last_msdu = 1;
  3774. tid = ts.tid;
  3775. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3776. tid = CDP_MAX_DATA_TIDS - 1;
  3777. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3778. if (qdf_unlikely(pdev->delay_stats_flag))
  3779. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  3780. if (tx_status < CDP_MAX_TX_HTT_STATUS) {
  3781. tid_stats->htt_status_cnt[tx_status]++;
  3782. }
  3783. peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
  3784. DP_MOD_ID_HTT_COMP);
  3785. dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
  3786. dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
  3787. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3788. if (qdf_likely(peer))
  3789. dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
  3790. break;
  3791. }
  3792. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  3793. {
  3794. dp_tx_reinject_handler(soc, vdev, tx_desc, status);
  3795. break;
  3796. }
  3797. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  3798. {
  3799. dp_tx_inspect_handler(soc, vdev, tx_desc, status);
  3800. break;
  3801. }
  3802. default:
  3803. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3804. "%s Invalid HTT tx_status %d\n",
  3805. __func__, tx_status);
  3806. break;
  3807. }
  3808. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  3809. }
  3810. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  3811. static inline
  3812. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3813. {
  3814. bool limit_hit = false;
  3815. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  3816. limit_hit =
  3817. (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
  3818. if (limit_hit)
  3819. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  3820. return limit_hit;
  3821. }
  3822. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3823. {
  3824. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  3825. }
  3826. #else
  3827. static inline
  3828. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3829. {
  3830. return false;
  3831. }
  3832. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3833. {
  3834. return false;
  3835. }
  3836. #endif
  3837. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  3838. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  3839. uint32_t quota)
  3840. {
  3841. void *tx_comp_hal_desc;
  3842. uint8_t buffer_src;
  3843. uint8_t pool_id;
  3844. uint32_t tx_desc_id;
  3845. struct dp_tx_desc_s *tx_desc = NULL;
  3846. struct dp_tx_desc_s *head_desc = NULL;
  3847. struct dp_tx_desc_s *tail_desc = NULL;
  3848. uint32_t num_processed = 0;
  3849. uint32_t count;
  3850. uint32_t num_avail_for_reap = 0;
  3851. bool force_break = false;
  3852. DP_HIST_INIT();
  3853. more_data:
  3854. /* Re-initialize local variables to be re-used */
  3855. head_desc = NULL;
  3856. tail_desc = NULL;
  3857. count = 0;
  3858. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  3859. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  3860. return 0;
  3861. }
  3862. num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
  3863. if (num_avail_for_reap >= quota)
  3864. num_avail_for_reap = quota;
  3865. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  3866. /* Find head descriptor from completion ring */
  3867. while (qdf_likely(num_avail_for_reap)) {
  3868. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  3869. if (qdf_unlikely(!tx_comp_hal_desc))
  3870. break;
  3871. buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
  3872. /* If this buffer was not released by TQM or FW, then it is not
  3873. * Tx completion indication, assert */
  3874. if (qdf_unlikely(buffer_src !=
  3875. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  3876. (qdf_unlikely(buffer_src !=
  3877. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  3878. uint8_t wbm_internal_error;
  3879. dp_err_rl(
  3880. "Tx comp release_src != TQM | FW but from %d",
  3881. buffer_src);
  3882. hal_dump_comp_desc(tx_comp_hal_desc);
  3883. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  3884. /* When WBM sees NULL buffer_addr_info in any of
  3885. * ingress rings it sends an error indication,
  3886. * with wbm_internal_error=1, to a specific ring.
  3887. * The WBM2SW ring used to indicate these errors is
  3888. * fixed in HW, and that ring is being used as Tx
  3889. * completion ring. These errors are not related to
  3890. * Tx completions, and should just be ignored
  3891. */
  3892. wbm_internal_error = hal_get_wbm_internal_error(
  3893. soc->hal_soc,
  3894. tx_comp_hal_desc);
  3895. if (wbm_internal_error) {
  3896. dp_err_rl("Tx comp wbm_internal_error!!");
  3897. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  3898. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  3899. buffer_src)
  3900. dp_handle_wbm_internal_error(
  3901. soc,
  3902. tx_comp_hal_desc,
  3903. hal_tx_comp_get_buffer_type(
  3904. tx_comp_hal_desc));
  3905. } else {
  3906. dp_err_rl("Tx comp wbm_internal_error false");
  3907. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  3908. }
  3909. continue;
  3910. }
  3911. /* Get descriptor id */
  3912. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  3913. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  3914. DP_TX_DESC_ID_POOL_OS;
  3915. /* Find Tx descriptor */
  3916. tx_desc = dp_tx_desc_find(soc, pool_id,
  3917. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  3918. DP_TX_DESC_ID_PAGE_OS,
  3919. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  3920. DP_TX_DESC_ID_OFFSET_OS);
  3921. /*
  3922. * If the release source is FW, process the HTT status
  3923. */
  3924. if (qdf_unlikely(buffer_src ==
  3925. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  3926. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  3927. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  3928. htt_tx_status);
  3929. dp_tx_process_htt_completion(soc, tx_desc,
  3930. htt_tx_status, ring_id);
  3931. } else {
  3932. tx_desc->peer_id =
  3933. hal_tx_comp_get_peer_id(tx_comp_hal_desc);
  3934. tx_desc->tx_status =
  3935. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  3936. /*
  3937. * If the fast completion mode is enabled extended
  3938. * metadata from descriptor is not copied
  3939. */
  3940. if (qdf_likely(tx_desc->flags &
  3941. DP_TX_DESC_FLAG_SIMPLE))
  3942. goto add_to_pool;
  3943. /*
  3944. * If the descriptor is already freed in vdev_detach,
  3945. * continue to next descriptor
  3946. */
  3947. if (qdf_unlikely
  3948. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  3949. !tx_desc->flags)) {
  3950. dp_tx_comp_info("Descriptor freed in vdev_detach %d",
  3951. tx_desc_id);
  3952. continue;
  3953. }
  3954. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3955. dp_tx_comp_info("pdev in down state %d",
  3956. tx_desc_id);
  3957. dp_tx_comp_free_buf(soc, tx_desc);
  3958. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3959. goto next_desc;
  3960. }
  3961. /* Pool id is not matching. Error */
  3962. if (tx_desc->pool_id != pool_id) {
  3963. dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
  3964. pool_id, tx_desc->pool_id);
  3965. qdf_assert_always(0);
  3966. }
  3967. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  3968. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  3969. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  3970. tx_desc->flags, tx_desc_id);
  3971. qdf_assert_always(0);
  3972. }
  3973. /* Collect hw completion contents */
  3974. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  3975. &tx_desc->comp, 1);
  3976. add_to_pool:
  3977. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  3978. /* First ring descriptor on the cycle */
  3979. if (!head_desc) {
  3980. head_desc = tx_desc;
  3981. tail_desc = tx_desc;
  3982. }
  3983. tail_desc->next = tx_desc;
  3984. tx_desc->next = NULL;
  3985. tail_desc = tx_desc;
  3986. }
  3987. next_desc:
  3988. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  3989. /*
  3990. * Processed packet count is more than given quota
  3991. * stop to processing
  3992. */
  3993. count++;
  3994. if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
  3995. break;
  3996. }
  3997. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  3998. /* Process the reaped descriptors */
  3999. if (head_desc)
  4000. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  4001. if (dp_tx_comp_enable_eol_data_check(soc)) {
  4002. if (num_processed >= quota)
  4003. force_break = true;
  4004. if (!force_break &&
  4005. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  4006. hal_ring_hdl)) {
  4007. DP_STATS_INC(soc, tx.hp_oos2, 1);
  4008. if (!hif_exec_should_yield(soc->hif_handle,
  4009. int_ctx->dp_intr_id))
  4010. goto more_data;
  4011. }
  4012. }
  4013. DP_TX_HIST_STATS_PER_PDEV();
  4014. return num_processed;
  4015. }
  4016. #ifdef FEATURE_WLAN_TDLS
  4017. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4018. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  4019. {
  4020. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4021. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4022. DP_MOD_ID_TDLS);
  4023. if (!vdev) {
  4024. dp_err("vdev handle for id %d is NULL", vdev_id);
  4025. return NULL;
  4026. }
  4027. if (tx_spec & OL_TX_SPEC_NO_FREE)
  4028. vdev->is_tdls_frame = true;
  4029. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  4030. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  4031. }
  4032. #endif
  4033. static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
  4034. {
  4035. struct wlan_cfg_dp_soc_ctxt *cfg;
  4036. struct dp_soc *soc;
  4037. soc = vdev->pdev->soc;
  4038. if (!soc)
  4039. return;
  4040. cfg = soc->wlan_cfg_ctx;
  4041. if (!cfg)
  4042. return;
  4043. if (vdev->opmode == wlan_op_mode_ndi)
  4044. vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
  4045. else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
  4046. (vdev->subtype == wlan_op_subtype_p2p_cli) ||
  4047. (vdev->subtype == wlan_op_subtype_p2p_go))
  4048. vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
  4049. else
  4050. vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
  4051. }
  4052. /**
  4053. * dp_tx_vdev_attach() - attach vdev to dp tx
  4054. * @vdev: virtual device instance
  4055. *
  4056. * Return: QDF_STATUS_SUCCESS: success
  4057. * QDF_STATUS_E_RESOURCES: Error return
  4058. */
  4059. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  4060. {
  4061. int pdev_id;
  4062. /*
  4063. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  4064. */
  4065. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  4066. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  4067. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  4068. vdev->vdev_id);
  4069. pdev_id =
  4070. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  4071. vdev->pdev->pdev_id);
  4072. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  4073. /*
  4074. * Set HTT Extension Valid bit to 0 by default
  4075. */
  4076. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  4077. dp_tx_vdev_update_search_flags(vdev);
  4078. dp_tx_vdev_update_feature_flags(vdev);
  4079. return QDF_STATUS_SUCCESS;
  4080. }
  4081. #ifndef FEATURE_WDS
  4082. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  4083. {
  4084. return false;
  4085. }
  4086. #endif
  4087. /**
  4088. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  4089. * @vdev: virtual device instance
  4090. *
  4091. * Return: void
  4092. *
  4093. */
  4094. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  4095. {
  4096. struct dp_soc *soc = vdev->pdev->soc;
  4097. /*
  4098. * Enable both AddrY (SA based search) and AddrX (Da based search)
  4099. * for TDLS link
  4100. *
  4101. * Enable AddrY (SA based search) only for non-WDS STA and
  4102. * ProxySTA VAP (in HKv1) modes.
  4103. *
  4104. * In all other VAP modes, only DA based search should be
  4105. * enabled
  4106. */
  4107. if (vdev->opmode == wlan_op_mode_sta &&
  4108. vdev->tdls_link_connected)
  4109. vdev->hal_desc_addr_search_flags =
  4110. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  4111. else if ((vdev->opmode == wlan_op_mode_sta) &&
  4112. !dp_tx_da_search_override(vdev))
  4113. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  4114. else
  4115. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  4116. /* Set search type only when peer map v2 messaging is enabled
  4117. * as we will have the search index (AST hash) only when v2 is
  4118. * enabled
  4119. */
  4120. if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
  4121. vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
  4122. else
  4123. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  4124. }
  4125. static inline bool
  4126. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  4127. struct dp_vdev *vdev,
  4128. struct dp_tx_desc_s *tx_desc)
  4129. {
  4130. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  4131. return false;
  4132. /*
  4133. * if vdev is given, then only check whether desc
  4134. * vdev match. if vdev is NULL, then check whether
  4135. * desc pdev match.
  4136. */
  4137. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  4138. (tx_desc->pdev == pdev);
  4139. }
  4140. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4141. /**
  4142. * dp_tx_desc_flush() - release resources associated
  4143. * to TX Desc
  4144. *
  4145. * @dp_pdev: Handle to DP pdev structure
  4146. * @vdev: virtual device instance
  4147. * NULL: no specific Vdev is required and check all allcated TX desc
  4148. * on this pdev.
  4149. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  4150. *
  4151. * @force_free:
  4152. * true: flush the TX desc.
  4153. * false: only reset the Vdev in each allocated TX desc
  4154. * that associated to current Vdev.
  4155. *
  4156. * This function will go through the TX desc pool to flush
  4157. * the outstanding TX data or reset Vdev to NULL in associated TX
  4158. * Desc.
  4159. */
  4160. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4161. bool force_free)
  4162. {
  4163. uint8_t i;
  4164. uint32_t j;
  4165. uint32_t num_desc, page_id, offset;
  4166. uint16_t num_desc_per_page;
  4167. struct dp_soc *soc = pdev->soc;
  4168. struct dp_tx_desc_s *tx_desc = NULL;
  4169. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4170. if (!vdev && !force_free) {
  4171. dp_err("Reset TX desc vdev, Vdev param is required!");
  4172. return;
  4173. }
  4174. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  4175. tx_desc_pool = &soc->tx_desc[i];
  4176. if (!(tx_desc_pool->pool_size) ||
  4177. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  4178. !(tx_desc_pool->desc_pages.cacheable_pages))
  4179. continue;
  4180. /*
  4181. * Add flow pool lock protection in case pool is freed
  4182. * due to all tx_desc is recycled when handle TX completion.
  4183. * this is not necessary when do force flush as:
  4184. * a. double lock will happen if dp_tx_desc_release is
  4185. * also trying to acquire it.
  4186. * b. dp interrupt has been disabled before do force TX desc
  4187. * flush in dp_pdev_deinit().
  4188. */
  4189. if (!force_free)
  4190. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  4191. num_desc = tx_desc_pool->pool_size;
  4192. num_desc_per_page =
  4193. tx_desc_pool->desc_pages.num_element_per_page;
  4194. for (j = 0; j < num_desc; j++) {
  4195. page_id = j / num_desc_per_page;
  4196. offset = j % num_desc_per_page;
  4197. if (qdf_unlikely(!(tx_desc_pool->
  4198. desc_pages.cacheable_pages)))
  4199. break;
  4200. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4201. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4202. /*
  4203. * Free TX desc if force free is
  4204. * required, otherwise only reset vdev
  4205. * in this TX desc.
  4206. */
  4207. if (force_free) {
  4208. dp_tx_comp_free_buf(soc, tx_desc);
  4209. dp_tx_desc_release(tx_desc, i);
  4210. } else {
  4211. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4212. }
  4213. }
  4214. }
  4215. if (!force_free)
  4216. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  4217. }
  4218. }
  4219. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4220. /**
  4221. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  4222. *
  4223. * @soc: Handle to DP soc structure
  4224. * @tx_desc: pointer of one TX desc
  4225. * @desc_pool_id: TX Desc pool id
  4226. */
  4227. static inline void
  4228. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  4229. uint8_t desc_pool_id)
  4230. {
  4231. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  4232. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4233. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  4234. }
  4235. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4236. bool force_free)
  4237. {
  4238. uint8_t i, num_pool;
  4239. uint32_t j;
  4240. uint32_t num_desc, page_id, offset;
  4241. uint16_t num_desc_per_page;
  4242. struct dp_soc *soc = pdev->soc;
  4243. struct dp_tx_desc_s *tx_desc = NULL;
  4244. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4245. if (!vdev && !force_free) {
  4246. dp_err("Reset TX desc vdev, Vdev param is required!");
  4247. return;
  4248. }
  4249. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4250. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4251. for (i = 0; i < num_pool; i++) {
  4252. tx_desc_pool = &soc->tx_desc[i];
  4253. if (!tx_desc_pool->desc_pages.cacheable_pages)
  4254. continue;
  4255. num_desc_per_page =
  4256. tx_desc_pool->desc_pages.num_element_per_page;
  4257. for (j = 0; j < num_desc; j++) {
  4258. page_id = j / num_desc_per_page;
  4259. offset = j % num_desc_per_page;
  4260. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4261. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4262. if (force_free) {
  4263. dp_tx_comp_free_buf(soc, tx_desc);
  4264. dp_tx_desc_release(tx_desc, i);
  4265. } else {
  4266. dp_tx_desc_reset_vdev(soc, tx_desc,
  4267. i);
  4268. }
  4269. }
  4270. }
  4271. }
  4272. }
  4273. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4274. /**
  4275. * dp_tx_vdev_detach() - detach vdev from dp tx
  4276. * @vdev: virtual device instance
  4277. *
  4278. * Return: QDF_STATUS_SUCCESS: success
  4279. * QDF_STATUS_E_RESOURCES: Error return
  4280. */
  4281. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  4282. {
  4283. struct dp_pdev *pdev = vdev->pdev;
  4284. /* Reset TX desc associated to this Vdev as NULL */
  4285. dp_tx_desc_flush(pdev, vdev, false);
  4286. return QDF_STATUS_SUCCESS;
  4287. }
  4288. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4289. /* Pools will be allocated dynamically */
  4290. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4291. int num_desc)
  4292. {
  4293. uint8_t i;
  4294. for (i = 0; i < num_pool; i++) {
  4295. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  4296. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  4297. }
  4298. return QDF_STATUS_SUCCESS;
  4299. }
  4300. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4301. int num_desc)
  4302. {
  4303. return QDF_STATUS_SUCCESS;
  4304. }
  4305. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4306. {
  4307. }
  4308. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4309. {
  4310. uint8_t i;
  4311. for (i = 0; i < num_pool; i++)
  4312. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  4313. }
  4314. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4315. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4316. int num_desc)
  4317. {
  4318. uint8_t i, count;
  4319. /* Allocate software Tx descriptor pools */
  4320. for (i = 0; i < num_pool; i++) {
  4321. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  4322. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4323. FL("Tx Desc Pool alloc %d failed %pK"),
  4324. i, soc);
  4325. goto fail;
  4326. }
  4327. }
  4328. return QDF_STATUS_SUCCESS;
  4329. fail:
  4330. for (count = 0; count < i; count++)
  4331. dp_tx_desc_pool_free(soc, count);
  4332. return QDF_STATUS_E_NOMEM;
  4333. }
  4334. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4335. int num_desc)
  4336. {
  4337. uint8_t i;
  4338. for (i = 0; i < num_pool; i++) {
  4339. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  4340. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4341. FL("Tx Desc Pool init %d failed %pK"),
  4342. i, soc);
  4343. return QDF_STATUS_E_NOMEM;
  4344. }
  4345. }
  4346. return QDF_STATUS_SUCCESS;
  4347. }
  4348. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4349. {
  4350. uint8_t i;
  4351. for (i = 0; i < num_pool; i++)
  4352. dp_tx_desc_pool_deinit(soc, i);
  4353. }
  4354. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4355. {
  4356. uint8_t i;
  4357. for (i = 0; i < num_pool; i++)
  4358. dp_tx_desc_pool_free(soc, i);
  4359. }
  4360. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4361. /**
  4362. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  4363. * @soc: core txrx main context
  4364. * @num_pool: number of pools
  4365. *
  4366. */
  4367. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  4368. {
  4369. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  4370. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  4371. }
  4372. /**
  4373. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  4374. * @soc: core txrx main context
  4375. * @num_pool: number of pools
  4376. *
  4377. */
  4378. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  4379. {
  4380. dp_tx_tso_desc_pool_free(soc, num_pool);
  4381. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  4382. }
  4383. /**
  4384. * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
  4385. * @soc: core txrx main context
  4386. *
  4387. * This function frees all tx related descriptors as below
  4388. * 1. Regular TX descriptors (static pools)
  4389. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4390. * 3. TSO descriptors
  4391. *
  4392. */
  4393. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  4394. {
  4395. uint8_t num_pool;
  4396. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4397. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4398. dp_tx_ext_desc_pool_free(soc, num_pool);
  4399. dp_tx_delete_static_pools(soc, num_pool);
  4400. }
  4401. /**
  4402. * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
  4403. * @soc: core txrx main context
  4404. *
  4405. * This function de-initializes all tx related descriptors as below
  4406. * 1. Regular TX descriptors (static pools)
  4407. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4408. * 3. TSO descriptors
  4409. *
  4410. */
  4411. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  4412. {
  4413. uint8_t num_pool;
  4414. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4415. dp_tx_flow_control_deinit(soc);
  4416. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4417. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4418. dp_tx_deinit_static_pools(soc, num_pool);
  4419. }
  4420. /**
  4421. * dp_tso_attach() - TSO attach handler
  4422. * @txrx_soc: Opaque Dp handle
  4423. *
  4424. * Reserve TSO descriptor buffers
  4425. *
  4426. * Return: QDF_STATUS_E_FAILURE on failure or
  4427. * QDF_STATUS_SUCCESS on success
  4428. */
  4429. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  4430. uint8_t num_pool,
  4431. uint16_t num_desc)
  4432. {
  4433. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  4434. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4435. return QDF_STATUS_E_FAILURE;
  4436. }
  4437. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  4438. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4439. num_pool, soc);
  4440. return QDF_STATUS_E_FAILURE;
  4441. }
  4442. return QDF_STATUS_SUCCESS;
  4443. }
  4444. /**
  4445. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  4446. * @soc: DP soc handle
  4447. * @num_pool: Number of pools
  4448. * @num_desc: Number of descriptors
  4449. *
  4450. * Initialize TSO descriptor pools
  4451. *
  4452. * Return: QDF_STATUS_E_FAILURE on failure or
  4453. * QDF_STATUS_SUCCESS on success
  4454. */
  4455. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  4456. uint8_t num_pool,
  4457. uint16_t num_desc)
  4458. {
  4459. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  4460. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4461. return QDF_STATUS_E_FAILURE;
  4462. }
  4463. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  4464. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4465. num_pool, soc);
  4466. return QDF_STATUS_E_FAILURE;
  4467. }
  4468. return QDF_STATUS_SUCCESS;
  4469. }
  4470. /**
  4471. * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
  4472. * @soc: core txrx main context
  4473. *
  4474. * This function allocates memory for following descriptor pools
  4475. * 1. regular sw tx descriptor pools (static pools)
  4476. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4477. * 3. TSO descriptor pools
  4478. *
  4479. * Return: QDF_STATUS_SUCCESS: success
  4480. * QDF_STATUS_E_RESOURCES: Error return
  4481. */
  4482. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  4483. {
  4484. uint8_t num_pool;
  4485. uint32_t num_desc;
  4486. uint32_t num_ext_desc;
  4487. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4488. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4489. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4490. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4491. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  4492. __func__, num_pool, num_desc);
  4493. if ((num_pool > MAX_TXDESC_POOLS) ||
  4494. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  4495. goto fail1;
  4496. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  4497. goto fail1;
  4498. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4499. goto fail2;
  4500. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4501. return QDF_STATUS_SUCCESS;
  4502. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4503. goto fail3;
  4504. return QDF_STATUS_SUCCESS;
  4505. fail3:
  4506. dp_tx_ext_desc_pool_free(soc, num_pool);
  4507. fail2:
  4508. dp_tx_delete_static_pools(soc, num_pool);
  4509. fail1:
  4510. return QDF_STATUS_E_RESOURCES;
  4511. }
  4512. /**
  4513. * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
  4514. * @soc: core txrx main context
  4515. *
  4516. * This function initializes the following TX descriptor pools
  4517. * 1. regular sw tx descriptor pools (static pools)
  4518. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4519. * 3. TSO descriptor pools
  4520. *
  4521. * Return: QDF_STATUS_SUCCESS: success
  4522. * QDF_STATUS_E_RESOURCES: Error return
  4523. */
  4524. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  4525. {
  4526. uint8_t num_pool;
  4527. uint32_t num_desc;
  4528. uint32_t num_ext_desc;
  4529. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4530. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4531. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4532. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  4533. goto fail1;
  4534. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  4535. goto fail2;
  4536. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4537. return QDF_STATUS_SUCCESS;
  4538. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4539. goto fail3;
  4540. dp_tx_flow_control_init(soc);
  4541. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  4542. return QDF_STATUS_SUCCESS;
  4543. fail3:
  4544. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4545. fail2:
  4546. dp_tx_deinit_static_pools(soc, num_pool);
  4547. fail1:
  4548. return QDF_STATUS_E_RESOURCES;
  4549. }
  4550. /**
  4551. * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
  4552. * @txrx_soc: dp soc handle
  4553. *
  4554. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4555. * QDF_STATUS_E_FAILURE
  4556. */
  4557. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  4558. {
  4559. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4560. uint8_t num_pool;
  4561. uint32_t num_desc;
  4562. uint32_t num_ext_desc;
  4563. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4564. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4565. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4566. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4567. return QDF_STATUS_E_FAILURE;
  4568. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4569. return QDF_STATUS_E_FAILURE;
  4570. return QDF_STATUS_SUCCESS;
  4571. }
  4572. /**
  4573. * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
  4574. * @txrx_soc: dp soc handle
  4575. *
  4576. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4577. */
  4578. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  4579. {
  4580. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4581. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4582. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4583. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4584. return QDF_STATUS_SUCCESS;
  4585. }