dp_tx.c 143 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_htt.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include "dp_peer.h"
  24. #include "dp_types.h"
  25. #include "hal_tx.h"
  26. #include "qdf_mem.h"
  27. #include "qdf_nbuf.h"
  28. #include "qdf_net_types.h"
  29. #include <wlan_cfg.h>
  30. #include "dp_ipa.h"
  31. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  32. #include "if_meta_hdr.h"
  33. #endif
  34. #include "enet.h"
  35. #include "dp_internal.h"
  36. #ifdef FEATURE_WDS
  37. #include "dp_txrx_wds.h"
  38. #endif
  39. #ifdef ATH_SUPPORT_IQUE
  40. #include "dp_txrx_me.h"
  41. #endif
  42. #include "dp_hist.h"
  43. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  44. #include <dp_swlm.h>
  45. #endif
  46. /* Flag to skip CCE classify when mesh or tid override enabled */
  47. #define DP_TX_SKIP_CCE_CLASSIFY \
  48. (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
  49. /* TODO Add support in TSO */
  50. #define DP_DESC_NUM_FRAG(x) 0
  51. /* disable TQM_BYPASS */
  52. #define TQM_BYPASS_WAR 0
  53. /* invalid peer id for reinject*/
  54. #define DP_INVALID_PEER 0XFFFE
  55. /*mapping between hal encrypt type and cdp_sec_type*/
  56. #define MAX_CDP_SEC_TYPE 12
  57. static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
  58. HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  59. HAL_TX_ENCRYPT_TYPE_WEP_128,
  60. HAL_TX_ENCRYPT_TYPE_WEP_104,
  61. HAL_TX_ENCRYPT_TYPE_WEP_40,
  62. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  63. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  64. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  65. HAL_TX_ENCRYPT_TYPE_WAPI,
  66. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  67. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  68. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  69. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  70. #ifdef QCA_TX_LIMIT_CHECK
  71. /**
  72. * dp_tx_limit_check - Check if allocated tx descriptors reached
  73. * soc max limit and pdev max limit
  74. * @vdev: DP vdev handle
  75. *
  76. * Return: true if allocated tx descriptors reached max configured value, else
  77. * false
  78. */
  79. static inline bool
  80. dp_tx_limit_check(struct dp_vdev *vdev)
  81. {
  82. struct dp_pdev *pdev = vdev->pdev;
  83. struct dp_soc *soc = pdev->soc;
  84. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  85. soc->num_tx_allowed) {
  86. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  87. "%s: queued packets are more than max tx, drop the frame",
  88. __func__);
  89. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  90. return true;
  91. }
  92. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  93. pdev->num_tx_allowed) {
  94. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  95. "%s: queued packets are more than max tx, drop the frame",
  96. __func__);
  97. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  98. return true;
  99. }
  100. return false;
  101. }
  102. /**
  103. * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
  104. * reached soc max limit
  105. * @vdev: DP vdev handle
  106. *
  107. * Return: true if allocated tx descriptors reached max configured value, else
  108. * false
  109. */
  110. static inline bool
  111. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  112. {
  113. struct dp_pdev *pdev = vdev->pdev;
  114. struct dp_soc *soc = pdev->soc;
  115. if (qdf_atomic_read(&soc->num_tx_exception) >=
  116. soc->num_msdu_exception_desc) {
  117. dp_info("exc packets are more than max drop the exc pkt");
  118. DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
  119. return true;
  120. }
  121. return false;
  122. }
  123. /**
  124. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  125. * @vdev: DP pdev handle
  126. *
  127. * Return: void
  128. */
  129. static inline void
  130. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  131. {
  132. struct dp_soc *soc = pdev->soc;
  133. qdf_atomic_inc(&pdev->num_tx_outstanding);
  134. qdf_atomic_inc(&soc->num_tx_outstanding);
  135. }
  136. /**
  137. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  138. * @vdev: DP pdev handle
  139. *
  140. * Return: void
  141. */
  142. static inline void
  143. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  144. {
  145. struct dp_soc *soc = pdev->soc;
  146. qdf_atomic_dec(&pdev->num_tx_outstanding);
  147. qdf_atomic_dec(&soc->num_tx_outstanding);
  148. }
  149. #else //QCA_TX_LIMIT_CHECK
  150. static inline bool
  151. dp_tx_limit_check(struct dp_vdev *vdev)
  152. {
  153. return false;
  154. }
  155. static inline bool
  156. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  157. {
  158. return false;
  159. }
  160. static inline void
  161. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  162. {
  163. qdf_atomic_inc(&pdev->num_tx_outstanding);
  164. }
  165. static inline void
  166. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  167. {
  168. qdf_atomic_dec(&pdev->num_tx_outstanding);
  169. }
  170. #endif //QCA_TX_LIMIT_CHECK
  171. #if defined(FEATURE_TSO)
  172. /**
  173. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  174. *
  175. * @soc - core txrx main context
  176. * @seg_desc - tso segment descriptor
  177. * @num_seg_desc - tso number segment descriptor
  178. */
  179. static void dp_tx_tso_unmap_segment(
  180. struct dp_soc *soc,
  181. struct qdf_tso_seg_elem_t *seg_desc,
  182. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  183. {
  184. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  185. if (qdf_unlikely(!seg_desc)) {
  186. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  187. __func__, __LINE__);
  188. qdf_assert(0);
  189. } else if (qdf_unlikely(!num_seg_desc)) {
  190. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  191. __func__, __LINE__);
  192. qdf_assert(0);
  193. } else {
  194. bool is_last_seg;
  195. /* no tso segment left to do dma unmap */
  196. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  197. return;
  198. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  199. true : false;
  200. qdf_nbuf_unmap_tso_segment(soc->osdev,
  201. seg_desc, is_last_seg);
  202. num_seg_desc->num_seg.tso_cmn_num_seg--;
  203. }
  204. }
  205. /**
  206. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  207. * back to the freelist
  208. *
  209. * @soc - soc device handle
  210. * @tx_desc - Tx software descriptor
  211. */
  212. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  213. struct dp_tx_desc_s *tx_desc)
  214. {
  215. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  216. if (qdf_unlikely(!tx_desc->tso_desc)) {
  217. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  218. "%s %d TSO desc is NULL!",
  219. __func__, __LINE__);
  220. qdf_assert(0);
  221. } else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
  222. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  223. "%s %d TSO num desc is NULL!",
  224. __func__, __LINE__);
  225. qdf_assert(0);
  226. } else {
  227. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  228. (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
  229. /* Add the tso num segment into the free list */
  230. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  231. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  232. tx_desc->tso_num_desc);
  233. tx_desc->tso_num_desc = NULL;
  234. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  235. }
  236. /* Add the tso segment into the free list*/
  237. dp_tx_tso_desc_free(soc,
  238. tx_desc->pool_id, tx_desc->tso_desc);
  239. tx_desc->tso_desc = NULL;
  240. }
  241. }
  242. #else
  243. static void dp_tx_tso_unmap_segment(
  244. struct dp_soc *soc,
  245. struct qdf_tso_seg_elem_t *seg_desc,
  246. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  247. {
  248. }
  249. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  250. struct dp_tx_desc_s *tx_desc)
  251. {
  252. }
  253. #endif
  254. /**
  255. * dp_tx_desc_release() - Release Tx Descriptor
  256. * @tx_desc : Tx Descriptor
  257. * @desc_pool_id: Descriptor Pool ID
  258. *
  259. * Deallocate all resources attached to Tx descriptor and free the Tx
  260. * descriptor.
  261. *
  262. * Return:
  263. */
  264. static void
  265. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  266. {
  267. struct dp_pdev *pdev = tx_desc->pdev;
  268. struct dp_soc *soc;
  269. uint8_t comp_status = 0;
  270. qdf_assert(pdev);
  271. soc = pdev->soc;
  272. dp_tx_outstanding_dec(pdev);
  273. if (tx_desc->frm_type == dp_tx_frm_tso)
  274. dp_tx_tso_desc_release(soc, tx_desc);
  275. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  276. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  277. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  278. dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
  279. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  280. qdf_atomic_dec(&soc->num_tx_exception);
  281. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  282. hal_tx_comp_get_buffer_source(&tx_desc->comp))
  283. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  284. soc->hal_soc);
  285. else
  286. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  287. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  288. "Tx Completion Release desc %d status %d outstanding %d",
  289. tx_desc->id, comp_status,
  290. qdf_atomic_read(&pdev->num_tx_outstanding));
  291. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  292. return;
  293. }
  294. /**
  295. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  296. * @vdev: DP vdev Handle
  297. * @nbuf: skb
  298. * @msdu_info: msdu_info required to create HTT metadata
  299. *
  300. * Prepares and fills HTT metadata in the frame pre-header for special frames
  301. * that should be transmitted using varying transmit parameters.
  302. * There are 2 VDEV modes that currently needs this special metadata -
  303. * 1) Mesh Mode
  304. * 2) DSRC Mode
  305. *
  306. * Return: HTT metadata size
  307. *
  308. */
  309. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  310. struct dp_tx_msdu_info_s *msdu_info)
  311. {
  312. uint32_t *meta_data = msdu_info->meta_data;
  313. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  314. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  315. uint8_t htt_desc_size;
  316. /* Size rounded of multiple of 8 bytes */
  317. uint8_t htt_desc_size_aligned;
  318. uint8_t *hdr = NULL;
  319. /*
  320. * Metadata - HTT MSDU Extension header
  321. */
  322. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  323. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  324. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  325. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  326. meta_data[0])) {
  327. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  328. htt_desc_size_aligned)) {
  329. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  330. htt_desc_size_aligned);
  331. if (!nbuf) {
  332. /*
  333. * qdf_nbuf_realloc_headroom won't do skb_clone
  334. * as skb_realloc_headroom does. so, no free is
  335. * needed here.
  336. */
  337. DP_STATS_INC(vdev,
  338. tx_i.dropped.headroom_insufficient,
  339. 1);
  340. qdf_print(" %s[%d] skb_realloc_headroom failed",
  341. __func__, __LINE__);
  342. return 0;
  343. }
  344. }
  345. /* Fill and add HTT metaheader */
  346. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  347. if (!hdr) {
  348. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  349. "Error in filling HTT metadata");
  350. return 0;
  351. }
  352. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  353. } else if (vdev->opmode == wlan_op_mode_ocb) {
  354. /* Todo - Add support for DSRC */
  355. }
  356. return htt_desc_size_aligned;
  357. }
  358. /**
  359. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  360. * @tso_seg: TSO segment to process
  361. * @ext_desc: Pointer to MSDU extension descriptor
  362. *
  363. * Return: void
  364. */
  365. #if defined(FEATURE_TSO)
  366. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  367. void *ext_desc)
  368. {
  369. uint8_t num_frag;
  370. uint32_t tso_flags;
  371. /*
  372. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  373. * tcp_flag_mask
  374. *
  375. * Checksum enable flags are set in TCL descriptor and not in Extension
  376. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  377. */
  378. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  379. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  380. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  381. tso_seg->tso_flags.ip_len);
  382. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  383. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  384. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  385. uint32_t lo = 0;
  386. uint32_t hi = 0;
  387. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  388. (tso_seg->tso_frags[num_frag].length));
  389. qdf_dmaaddr_to_32s(
  390. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  391. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  392. tso_seg->tso_frags[num_frag].length);
  393. }
  394. return;
  395. }
  396. #else
  397. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  398. void *ext_desc)
  399. {
  400. return;
  401. }
  402. #endif
  403. #if defined(FEATURE_TSO)
  404. /**
  405. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  406. * allocated and free them
  407. *
  408. * @soc: soc handle
  409. * @free_seg: list of tso segments
  410. * @msdu_info: msdu descriptor
  411. *
  412. * Return - void
  413. */
  414. static void dp_tx_free_tso_seg_list(
  415. struct dp_soc *soc,
  416. struct qdf_tso_seg_elem_t *free_seg,
  417. struct dp_tx_msdu_info_s *msdu_info)
  418. {
  419. struct qdf_tso_seg_elem_t *next_seg;
  420. while (free_seg) {
  421. next_seg = free_seg->next;
  422. dp_tx_tso_desc_free(soc,
  423. msdu_info->tx_queue.desc_pool_id,
  424. free_seg);
  425. free_seg = next_seg;
  426. }
  427. }
  428. /**
  429. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  430. * allocated and free them
  431. *
  432. * @soc: soc handle
  433. * @free_num_seg: list of tso number segments
  434. * @msdu_info: msdu descriptor
  435. * Return - void
  436. */
  437. static void dp_tx_free_tso_num_seg_list(
  438. struct dp_soc *soc,
  439. struct qdf_tso_num_seg_elem_t *free_num_seg,
  440. struct dp_tx_msdu_info_s *msdu_info)
  441. {
  442. struct qdf_tso_num_seg_elem_t *next_num_seg;
  443. while (free_num_seg) {
  444. next_num_seg = free_num_seg->next;
  445. dp_tso_num_seg_free(soc,
  446. msdu_info->tx_queue.desc_pool_id,
  447. free_num_seg);
  448. free_num_seg = next_num_seg;
  449. }
  450. }
  451. /**
  452. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  453. * do dma unmap for each segment
  454. *
  455. * @soc: soc handle
  456. * @free_seg: list of tso segments
  457. * @num_seg_desc: tso number segment descriptor
  458. *
  459. * Return - void
  460. */
  461. static void dp_tx_unmap_tso_seg_list(
  462. struct dp_soc *soc,
  463. struct qdf_tso_seg_elem_t *free_seg,
  464. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  465. {
  466. struct qdf_tso_seg_elem_t *next_seg;
  467. if (qdf_unlikely(!num_seg_desc)) {
  468. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  469. return;
  470. }
  471. while (free_seg) {
  472. next_seg = free_seg->next;
  473. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  474. free_seg = next_seg;
  475. }
  476. }
  477. #ifdef FEATURE_TSO_STATS
  478. /**
  479. * dp_tso_get_stats_idx: Retrieve the tso packet id
  480. * @pdev - pdev handle
  481. *
  482. * Return: id
  483. */
  484. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  485. {
  486. uint32_t stats_idx;
  487. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  488. % CDP_MAX_TSO_PACKETS);
  489. return stats_idx;
  490. }
  491. #else
  492. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  493. {
  494. return 0;
  495. }
  496. #endif /* FEATURE_TSO_STATS */
  497. /**
  498. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  499. * free the tso segments descriptor and
  500. * tso num segments descriptor
  501. *
  502. * @soc: soc handle
  503. * @msdu_info: msdu descriptor
  504. * @tso_seg_unmap: flag to show if dma unmap is necessary
  505. *
  506. * Return - void
  507. */
  508. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  509. struct dp_tx_msdu_info_s *msdu_info,
  510. bool tso_seg_unmap)
  511. {
  512. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  513. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  514. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  515. tso_info->tso_num_seg_list;
  516. /* do dma unmap for each segment */
  517. if (tso_seg_unmap)
  518. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  519. /* free all tso number segment descriptor though looks only have 1 */
  520. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  521. /* free all tso segment descriptor */
  522. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  523. }
  524. /**
  525. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  526. * @vdev: virtual device handle
  527. * @msdu: network buffer
  528. * @msdu_info: meta data associated with the msdu
  529. *
  530. * Return: QDF_STATUS_SUCCESS success
  531. */
  532. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  533. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  534. {
  535. struct qdf_tso_seg_elem_t *tso_seg;
  536. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  537. struct dp_soc *soc = vdev->pdev->soc;
  538. struct dp_pdev *pdev = vdev->pdev;
  539. struct qdf_tso_info_t *tso_info;
  540. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  541. tso_info = &msdu_info->u.tso_info;
  542. tso_info->curr_seg = NULL;
  543. tso_info->tso_seg_list = NULL;
  544. tso_info->num_segs = num_seg;
  545. msdu_info->frm_type = dp_tx_frm_tso;
  546. tso_info->tso_num_seg_list = NULL;
  547. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  548. while (num_seg) {
  549. tso_seg = dp_tx_tso_desc_alloc(
  550. soc, msdu_info->tx_queue.desc_pool_id);
  551. if (tso_seg) {
  552. tso_seg->next = tso_info->tso_seg_list;
  553. tso_info->tso_seg_list = tso_seg;
  554. num_seg--;
  555. } else {
  556. dp_err_rl("Failed to alloc tso seg desc");
  557. DP_STATS_INC_PKT(vdev->pdev,
  558. tso_stats.tso_no_mem_dropped, 1,
  559. qdf_nbuf_len(msdu));
  560. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  561. return QDF_STATUS_E_NOMEM;
  562. }
  563. }
  564. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  565. tso_num_seg = dp_tso_num_seg_alloc(soc,
  566. msdu_info->tx_queue.desc_pool_id);
  567. if (tso_num_seg) {
  568. tso_num_seg->next = tso_info->tso_num_seg_list;
  569. tso_info->tso_num_seg_list = tso_num_seg;
  570. } else {
  571. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  572. __func__);
  573. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  574. return QDF_STATUS_E_NOMEM;
  575. }
  576. msdu_info->num_seg =
  577. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  578. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  579. msdu_info->num_seg);
  580. if (!(msdu_info->num_seg)) {
  581. /*
  582. * Free allocated TSO seg desc and number seg desc,
  583. * do unmap for segments if dma map has done.
  584. */
  585. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  586. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  587. return QDF_STATUS_E_INVAL;
  588. }
  589. tso_info->curr_seg = tso_info->tso_seg_list;
  590. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  591. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  592. msdu, msdu_info->num_seg);
  593. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  594. tso_info->msdu_stats_idx);
  595. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  596. return QDF_STATUS_SUCCESS;
  597. }
  598. #else
  599. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  600. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  601. {
  602. return QDF_STATUS_E_NOMEM;
  603. }
  604. #endif
  605. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  606. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  607. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  608. /**
  609. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  610. * @vdev: DP Vdev handle
  611. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  612. * @desc_pool_id: Descriptor Pool ID
  613. *
  614. * Return:
  615. */
  616. static
  617. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  618. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  619. {
  620. uint8_t i;
  621. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  622. struct dp_tx_seg_info_s *seg_info;
  623. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  624. struct dp_soc *soc = vdev->pdev->soc;
  625. /* Allocate an extension descriptor */
  626. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  627. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  628. if (!msdu_ext_desc) {
  629. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  630. return NULL;
  631. }
  632. if (msdu_info->exception_fw &&
  633. qdf_unlikely(vdev->mesh_vdev)) {
  634. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  635. &msdu_info->meta_data[0],
  636. sizeof(struct htt_tx_msdu_desc_ext2_t));
  637. qdf_atomic_inc(&soc->num_tx_exception);
  638. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  639. }
  640. switch (msdu_info->frm_type) {
  641. case dp_tx_frm_sg:
  642. case dp_tx_frm_me:
  643. case dp_tx_frm_raw:
  644. seg_info = msdu_info->u.sg_info.curr_seg;
  645. /* Update the buffer pointers in MSDU Extension Descriptor */
  646. for (i = 0; i < seg_info->frag_cnt; i++) {
  647. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  648. seg_info->frags[i].paddr_lo,
  649. seg_info->frags[i].paddr_hi,
  650. seg_info->frags[i].len);
  651. }
  652. break;
  653. case dp_tx_frm_tso:
  654. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  655. &cached_ext_desc[0]);
  656. break;
  657. default:
  658. break;
  659. }
  660. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  661. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  662. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  663. msdu_ext_desc->vaddr);
  664. return msdu_ext_desc;
  665. }
  666. /**
  667. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  668. *
  669. * @skb: skb to be traced
  670. * @msdu_id: msdu_id of the packet
  671. * @vdev_id: vdev_id of the packet
  672. *
  673. * Return: None
  674. */
  675. #ifdef DP_DISABLE_TX_PKT_TRACE
  676. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  677. uint8_t vdev_id)
  678. {
  679. }
  680. #else
  681. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  682. uint8_t vdev_id)
  683. {
  684. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  685. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  686. DPTRACE(qdf_dp_trace_ptr(skb,
  687. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  688. QDF_TRACE_DEFAULT_PDEV_ID,
  689. qdf_nbuf_data_addr(skb),
  690. sizeof(qdf_nbuf_data(skb)),
  691. msdu_id, vdev_id));
  692. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  693. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  694. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  695. msdu_id, QDF_TX));
  696. }
  697. #endif
  698. #ifdef QCA_SUPPORT_WDS_EXTENDED
  699. /**
  700. * dp_is_tx_extended() - Configure AST override from peer ast entry
  701. *
  702. * @vdev: DP vdev handle
  703. * @tx_exc_metadata: Handle that holds exception path metadata
  704. *
  705. * Return: if this packet needs to exception to FW or not
  706. * (false: exception to wlan FW, true: do not exception)
  707. */
  708. static inline bool
  709. dp_is_tx_extended(struct dp_vdev *vdev, struct cdp_tx_exception_metadata
  710. *tx_exc_metadata)
  711. {
  712. if (qdf_likely(!vdev->wds_ext_enabled))
  713. return false;
  714. if (tx_exc_metadata && !tx_exc_metadata->is_wds_extended)
  715. return false;
  716. return true;
  717. }
  718. /**
  719. * dp_tx_wds_ext() - Configure AST override from peer ast entry
  720. *
  721. * @soc: DP soc handle
  722. * @vdev: DP vdev handle
  723. * @peer_id: peer_id of the peer for which packet is destined
  724. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  725. *
  726. * Return: None
  727. */
  728. static inline void
  729. dp_tx_wds_ext(struct dp_soc *soc, struct dp_vdev *vdev, uint16_t peer_id,
  730. struct dp_tx_msdu_info_s *msdu_info)
  731. {
  732. struct dp_peer *peer = NULL;
  733. msdu_info->search_type = vdev->search_type;
  734. msdu_info->ast_idx = vdev->bss_ast_idx;
  735. msdu_info->ast_hash = vdev->bss_ast_hash;
  736. if (qdf_likely(!vdev->wds_ext_enabled))
  737. return;
  738. peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_TX);
  739. if (qdf_unlikely(!peer))
  740. return;
  741. msdu_info->search_type = HAL_TX_ADDR_INDEX_SEARCH;
  742. msdu_info->ast_idx = peer->self_ast_entry->ast_idx;
  743. msdu_info->ast_hash = peer->self_ast_entry->ast_hash_value;
  744. dp_peer_unref_delete(peer, DP_MOD_ID_TX);
  745. msdu_info->exception_fw = 0;
  746. }
  747. #else
  748. static inline bool
  749. dp_is_tx_extended(struct dp_vdev *vdev, struct cdp_tx_exception_metadata
  750. *tx_exc_metadata)
  751. {
  752. return false;
  753. }
  754. static inline void
  755. dp_tx_wds_ext(struct dp_soc *soc, struct dp_vdev *vdev, uint16_t peer_id,
  756. struct dp_tx_msdu_info_s *msdu_info)
  757. {
  758. msdu_info->search_type = vdev->search_type;
  759. msdu_info->ast_idx = vdev->bss_ast_idx;
  760. msdu_info->ast_hash = vdev->bss_ast_hash;
  761. }
  762. #endif
  763. /**
  764. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  765. * @vdev: DP vdev handle
  766. * @nbuf: skb
  767. * @desc_pool_id: Descriptor pool ID
  768. * @meta_data: Metadata to the fw
  769. * @tx_exc_metadata: Handle that holds exception path metadata
  770. * Allocate and prepare Tx descriptor with msdu information.
  771. *
  772. * Return: Pointer to Tx Descriptor on success,
  773. * NULL on failure
  774. */
  775. static
  776. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  777. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  778. struct dp_tx_msdu_info_s *msdu_info,
  779. struct cdp_tx_exception_metadata *tx_exc_metadata)
  780. {
  781. uint8_t align_pad;
  782. uint8_t is_exception = 0;
  783. uint8_t htt_hdr_size;
  784. struct dp_tx_desc_s *tx_desc;
  785. struct dp_pdev *pdev = vdev->pdev;
  786. struct dp_soc *soc = pdev->soc;
  787. if (dp_tx_limit_check(vdev))
  788. return NULL;
  789. /* Allocate software Tx descriptor */
  790. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  791. if (qdf_unlikely(!tx_desc)) {
  792. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  793. return NULL;
  794. }
  795. dp_tx_outstanding_inc(pdev);
  796. /* Initialize the SW tx descriptor */
  797. tx_desc->nbuf = nbuf;
  798. tx_desc->frm_type = dp_tx_frm_std;
  799. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  800. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  801. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  802. tx_desc->vdev_id = vdev->vdev_id;
  803. tx_desc->pdev = pdev;
  804. tx_desc->msdu_ext_desc = NULL;
  805. tx_desc->pkt_offset = 0;
  806. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  807. if (qdf_unlikely(vdev->multipass_en)) {
  808. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  809. goto failure;
  810. }
  811. if (qdf_unlikely(dp_is_tx_extended(vdev, tx_exc_metadata)))
  812. return tx_desc;
  813. /*
  814. * For special modes (vdev_type == ocb or mesh), data frames should be
  815. * transmitted using varying transmit parameters (tx spec) which include
  816. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  817. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  818. * These frames are sent as exception packets to firmware.
  819. *
  820. * HW requirement is that metadata should always point to a
  821. * 8-byte aligned address. So we add alignment pad to start of buffer.
  822. * HTT Metadata should be ensured to be multiple of 8-bytes,
  823. * to get 8-byte aligned start address along with align_pad added
  824. *
  825. * |-----------------------------|
  826. * | |
  827. * |-----------------------------| <-----Buffer Pointer Address given
  828. * | | ^ in HW descriptor (aligned)
  829. * | HTT Metadata | |
  830. * | | |
  831. * | | | Packet Offset given in descriptor
  832. * | | |
  833. * |-----------------------------| |
  834. * | Alignment Pad | v
  835. * |-----------------------------| <----- Actual buffer start address
  836. * | SKB Data | (Unaligned)
  837. * | |
  838. * | |
  839. * | |
  840. * | |
  841. * | |
  842. * |-----------------------------|
  843. */
  844. if (qdf_unlikely((msdu_info->exception_fw)) ||
  845. (vdev->opmode == wlan_op_mode_ocb) ||
  846. (tx_exc_metadata &&
  847. tx_exc_metadata->is_tx_sniffer)) {
  848. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  849. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  850. DP_STATS_INC(vdev,
  851. tx_i.dropped.headroom_insufficient, 1);
  852. goto failure;
  853. }
  854. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  855. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  856. "qdf_nbuf_push_head failed");
  857. goto failure;
  858. }
  859. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  860. msdu_info);
  861. if (htt_hdr_size == 0)
  862. goto failure;
  863. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  864. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  865. is_exception = 1;
  866. }
  867. #if !TQM_BYPASS_WAR
  868. if (is_exception || tx_exc_metadata)
  869. #endif
  870. {
  871. /* Temporary WAR due to TQM VP issues */
  872. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  873. qdf_atomic_inc(&soc->num_tx_exception);
  874. }
  875. return tx_desc;
  876. failure:
  877. dp_tx_desc_release(tx_desc, desc_pool_id);
  878. return NULL;
  879. }
  880. /**
  881. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  882. * @vdev: DP vdev handle
  883. * @nbuf: skb
  884. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  885. * @desc_pool_id : Descriptor Pool ID
  886. *
  887. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  888. * information. For frames wth fragments, allocate and prepare
  889. * an MSDU extension descriptor
  890. *
  891. * Return: Pointer to Tx Descriptor on success,
  892. * NULL on failure
  893. */
  894. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  895. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  896. uint8_t desc_pool_id)
  897. {
  898. struct dp_tx_desc_s *tx_desc;
  899. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  900. struct dp_pdev *pdev = vdev->pdev;
  901. struct dp_soc *soc = pdev->soc;
  902. if (dp_tx_limit_check(vdev))
  903. return NULL;
  904. /* Allocate software Tx descriptor */
  905. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  906. if (!tx_desc) {
  907. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  908. return NULL;
  909. }
  910. dp_tx_outstanding_inc(pdev);
  911. /* Initialize the SW tx descriptor */
  912. tx_desc->nbuf = nbuf;
  913. tx_desc->frm_type = msdu_info->frm_type;
  914. tx_desc->tx_encap_type = vdev->tx_encap_type;
  915. tx_desc->vdev_id = vdev->vdev_id;
  916. tx_desc->pdev = pdev;
  917. tx_desc->pkt_offset = 0;
  918. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  919. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  920. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  921. /* Handle scattered frames - TSO/SG/ME */
  922. /* Allocate and prepare an extension descriptor for scattered frames */
  923. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  924. if (!msdu_ext_desc) {
  925. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  926. "%s Tx Extension Descriptor Alloc Fail",
  927. __func__);
  928. goto failure;
  929. }
  930. #if TQM_BYPASS_WAR
  931. /* Temporary WAR due to TQM VP issues */
  932. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  933. qdf_atomic_inc(&soc->num_tx_exception);
  934. #endif
  935. if (qdf_unlikely(msdu_info->exception_fw))
  936. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  937. tx_desc->msdu_ext_desc = msdu_ext_desc;
  938. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  939. return tx_desc;
  940. failure:
  941. dp_tx_desc_release(tx_desc, desc_pool_id);
  942. return NULL;
  943. }
  944. /**
  945. * dp_tx_prepare_raw() - Prepare RAW packet TX
  946. * @vdev: DP vdev handle
  947. * @nbuf: buffer pointer
  948. * @seg_info: Pointer to Segment info Descriptor to be prepared
  949. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  950. * descriptor
  951. *
  952. * Return:
  953. */
  954. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  955. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  956. {
  957. qdf_nbuf_t curr_nbuf = NULL;
  958. uint16_t total_len = 0;
  959. qdf_dma_addr_t paddr;
  960. int32_t i;
  961. int32_t mapped_buf_num = 0;
  962. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  963. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  964. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  965. /* Continue only if frames are of DATA type */
  966. if (!DP_FRAME_IS_DATA(qos_wh)) {
  967. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  968. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  969. "Pkt. recd is of not data type");
  970. goto error;
  971. }
  972. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  973. if (vdev->raw_mode_war &&
  974. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  975. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  976. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  977. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  978. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  979. /*
  980. * Number of nbuf's must not exceed the size of the frags
  981. * array in seg_info.
  982. */
  983. if (i >= DP_TX_MAX_NUM_FRAGS) {
  984. dp_err_rl("nbuf cnt exceeds the max number of segs");
  985. DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
  986. goto error;
  987. }
  988. if (QDF_STATUS_SUCCESS !=
  989. qdf_nbuf_map_nbytes_single(vdev->osdev,
  990. curr_nbuf,
  991. QDF_DMA_TO_DEVICE,
  992. curr_nbuf->len)) {
  993. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  994. "%s dma map error ", __func__);
  995. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  996. goto error;
  997. }
  998. /* Update the count of mapped nbuf's */
  999. mapped_buf_num++;
  1000. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  1001. seg_info->frags[i].paddr_lo = paddr;
  1002. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  1003. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  1004. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  1005. total_len += qdf_nbuf_len(curr_nbuf);
  1006. }
  1007. seg_info->frag_cnt = i;
  1008. seg_info->total_len = total_len;
  1009. seg_info->next = NULL;
  1010. sg_info->curr_seg = seg_info;
  1011. msdu_info->frm_type = dp_tx_frm_raw;
  1012. msdu_info->num_seg = 1;
  1013. return nbuf;
  1014. error:
  1015. i = 0;
  1016. while (nbuf) {
  1017. curr_nbuf = nbuf;
  1018. if (i < mapped_buf_num) {
  1019. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  1020. QDF_DMA_TO_DEVICE,
  1021. curr_nbuf->len);
  1022. i++;
  1023. }
  1024. nbuf = qdf_nbuf_next(nbuf);
  1025. qdf_nbuf_free(curr_nbuf);
  1026. }
  1027. return NULL;
  1028. }
  1029. /**
  1030. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  1031. * @soc: DP soc handle
  1032. * @nbuf: Buffer pointer
  1033. *
  1034. * unmap the chain of nbufs that belong to this RAW frame.
  1035. *
  1036. * Return: None
  1037. */
  1038. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  1039. qdf_nbuf_t nbuf)
  1040. {
  1041. qdf_nbuf_t cur_nbuf = nbuf;
  1042. do {
  1043. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  1044. QDF_DMA_TO_DEVICE,
  1045. cur_nbuf->len);
  1046. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  1047. } while (cur_nbuf);
  1048. }
  1049. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1050. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
  1051. { \
  1052. qdf_nbuf_t nbuf_local; \
  1053. struct dp_vdev *vdev_local = vdev_hdl; \
  1054. do { \
  1055. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  1056. break; \
  1057. nbuf_local = nbuf; \
  1058. if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
  1059. htt_cmn_pkt_type_raw)) \
  1060. break; \
  1061. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
  1062. break; \
  1063. else if (qdf_nbuf_is_tso((nbuf_local))) \
  1064. break; \
  1065. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  1066. (nbuf_local), \
  1067. NULL, 1, 0); \
  1068. } while (0); \
  1069. }
  1070. #else
  1071. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
  1072. #endif
  1073. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1074. /**
  1075. * dp_tx_update_stats() - Update soc level tx stats
  1076. * @soc: DP soc handle
  1077. * @nbuf: packet being transmitted
  1078. *
  1079. * Returns: none
  1080. */
  1081. static inline void dp_tx_update_stats(struct dp_soc *soc,
  1082. qdf_nbuf_t nbuf)
  1083. {
  1084. DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
  1085. }
  1086. /**
  1087. * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
  1088. * @soc: Datapath soc handle
  1089. * @tx_desc: tx packet descriptor
  1090. * @tid: TID for pkt transmission
  1091. *
  1092. * Returns: 1, if coalescing is to be done
  1093. * 0, if coalescing is not to be done
  1094. */
  1095. static inline int
  1096. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1097. struct dp_tx_desc_s *tx_desc,
  1098. uint8_t tid)
  1099. {
  1100. struct dp_swlm *swlm = &soc->swlm;
  1101. union swlm_data swlm_query_data;
  1102. struct dp_swlm_tcl_data tcl_data;
  1103. QDF_STATUS status;
  1104. int ret;
  1105. if (qdf_unlikely(!swlm->is_enabled))
  1106. return 0;
  1107. tcl_data.nbuf = tx_desc->nbuf;
  1108. tcl_data.tid = tid;
  1109. tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
  1110. swlm_query_data.tcl_data = &tcl_data;
  1111. status = dp_swlm_tcl_pre_check(soc, &tcl_data);
  1112. if (QDF_IS_STATUS_ERROR(status)) {
  1113. dp_swlm_tcl_reset_session_data(soc);
  1114. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1115. return 0;
  1116. }
  1117. ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
  1118. if (ret) {
  1119. DP_STATS_INC(swlm, tcl.coalesce_success, 1);
  1120. } else {
  1121. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1122. }
  1123. return ret;
  1124. }
  1125. /**
  1126. * dp_tx_ring_access_end() - HAL ring access end for data transmission
  1127. * @soc: Datapath soc handle
  1128. * @hal_ring_hdl: HAL ring handle
  1129. * @coalesce: Coalesce the current write or not
  1130. *
  1131. * Returns: none
  1132. */
  1133. static inline void
  1134. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1135. int coalesce)
  1136. {
  1137. if (coalesce)
  1138. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1139. else
  1140. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1141. }
  1142. #else
  1143. static inline void dp_tx_update_stats(struct dp_soc *soc,
  1144. qdf_nbuf_t nbuf)
  1145. {
  1146. }
  1147. static inline int
  1148. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1149. struct dp_tx_desc_s *tx_desc,
  1150. uint8_t tid)
  1151. {
  1152. return 0;
  1153. }
  1154. static inline void
  1155. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1156. int coalesce)
  1157. {
  1158. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1159. }
  1160. #endif
  1161. /**
  1162. * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
  1163. * @soc: DP Soc Handle
  1164. * @vdev: DP vdev handle
  1165. * @tx_desc: Tx Descriptor Handle
  1166. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1167. * @fw_metadata: Metadata to send to Target Firmware along with frame
  1168. * @ring_id: Ring ID of H/W ring to which we enqueue the packet
  1169. * @tx_exc_metadata: Handle that holds exception path meta data
  1170. *
  1171. * Gets the next free TCL HW DMA descriptor and sets up required parameters
  1172. * from software Tx descriptor
  1173. *
  1174. * Return: QDF_STATUS_SUCCESS: success
  1175. * QDF_STATUS_E_RESOURCES: Error return
  1176. */
  1177. static QDF_STATUS
  1178. dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
  1179. struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
  1180. struct cdp_tx_exception_metadata *tx_exc_metadata,
  1181. struct dp_tx_msdu_info_s *msdu_info)
  1182. {
  1183. uint8_t type;
  1184. void *hal_tx_desc;
  1185. uint32_t *hal_tx_desc_cached;
  1186. int coalesce = 0;
  1187. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1188. uint8_t ring_id = tx_q->ring_id & DP_TX_QUEUE_MASK;
  1189. uint8_t tid = msdu_info->tid;
  1190. /*
  1191. * Setting it initialization statically here to avoid
  1192. * a memset call jump with qdf_mem_set call
  1193. */
  1194. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  1195. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  1196. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  1197. tx_exc_metadata->sec_type : vdev->sec_type);
  1198. /* Return Buffer Manager ID */
  1199. uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
  1200. hal_ring_handle_t hal_ring_hdl = NULL;
  1201. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  1202. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  1203. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  1204. return QDF_STATUS_E_RESOURCES;
  1205. }
  1206. hal_tx_desc_cached = (void *) cached_desc;
  1207. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
  1208. type = HAL_TX_BUF_TYPE_EXT_DESC;
  1209. tx_desc->dma_addr = tx_desc->msdu_ext_desc->paddr;
  1210. if (tx_desc->msdu_ext_desc->flags &
  1211. DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  1212. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1213. else
  1214. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  1215. } else {
  1216. tx_desc->length = qdf_nbuf_len(tx_desc->nbuf) -
  1217. tx_desc->pkt_offset;
  1218. type = HAL_TX_BUF_TYPE_BUFFER;
  1219. tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  1220. }
  1221. qdf_assert_always(tx_desc->dma_addr);
  1222. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  1223. tx_desc->dma_addr, bm_id, tx_desc->id,
  1224. type);
  1225. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  1226. vdev->lmac_id);
  1227. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  1228. msdu_info->search_type);
  1229. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  1230. msdu_info->ast_idx);
  1231. hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
  1232. vdev->dscp_tid_map_id);
  1233. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  1234. sec_type_map[sec_type]);
  1235. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  1236. (msdu_info->ast_hash & 0xF));
  1237. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  1238. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  1239. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  1240. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  1241. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  1242. vdev->hal_desc_addr_search_flags);
  1243. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  1244. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  1245. /* verify checksum offload configuration*/
  1246. if (vdev->csum_enabled &&
  1247. ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
  1248. || qdf_nbuf_is_tso(tx_desc->nbuf))) {
  1249. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  1250. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  1251. }
  1252. if (tid != HTT_TX_EXT_TID_INVALID)
  1253. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  1254. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  1255. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  1256. if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
  1257. qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
  1258. soc->wlan_cfg_ctx)))
  1259. tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  1260. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  1261. tx_desc->length, type, (uint64_t)tx_desc->dma_addr,
  1262. tx_desc->pkt_offset, tx_desc->id);
  1263. hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
  1264. if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
  1265. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1266. "%s %d : HAL RING Access Failed -- %pK",
  1267. __func__, __LINE__, hal_ring_hdl);
  1268. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1269. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1270. return status;
  1271. }
  1272. /* Sync cached descriptor with HW */
  1273. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  1274. if (qdf_unlikely(!hal_tx_desc)) {
  1275. dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
  1276. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1277. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1278. goto ring_access_fail;
  1279. }
  1280. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  1281. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
  1282. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
  1283. coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid);
  1284. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
  1285. dp_tx_update_stats(soc, tx_desc->nbuf);
  1286. status = QDF_STATUS_SUCCESS;
  1287. ring_access_fail:
  1288. if (hif_pm_runtime_get(soc->hif_handle,
  1289. RTPM_ID_DW_TX_HW_ENQUEUE) == 0) {
  1290. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1291. hif_pm_runtime_put(soc->hif_handle,
  1292. RTPM_ID_DW_TX_HW_ENQUEUE);
  1293. } else {
  1294. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1295. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1296. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1297. }
  1298. return status;
  1299. }
  1300. /**
  1301. * dp_cce_classify() - Classify the frame based on CCE rules
  1302. * @vdev: DP vdev handle
  1303. * @nbuf: skb
  1304. *
  1305. * Classify frames based on CCE rules
  1306. * Return: bool( true if classified,
  1307. * else false)
  1308. */
  1309. static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1310. {
  1311. qdf_ether_header_t *eh = NULL;
  1312. uint16_t ether_type;
  1313. qdf_llc_t *llcHdr;
  1314. qdf_nbuf_t nbuf_clone = NULL;
  1315. qdf_dot3_qosframe_t *qos_wh = NULL;
  1316. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1317. /*
  1318. * In case of mesh packets or hlos tid override enabled,
  1319. * don't do any classification
  1320. */
  1321. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1322. & DP_TX_SKIP_CCE_CLASSIFY))
  1323. return false;
  1324. }
  1325. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1326. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1327. ether_type = eh->ether_type;
  1328. llcHdr = (qdf_llc_t *)(nbuf->data +
  1329. sizeof(qdf_ether_header_t));
  1330. } else {
  1331. qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1332. /* For encrypted packets don't do any classification */
  1333. if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
  1334. return false;
  1335. if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
  1336. if (qdf_unlikely(
  1337. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
  1338. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
  1339. ether_type = *(uint16_t *)(nbuf->data
  1340. + QDF_IEEE80211_4ADDR_HDR_LEN
  1341. + sizeof(qdf_llc_t)
  1342. - sizeof(ether_type));
  1343. llcHdr = (qdf_llc_t *)(nbuf->data +
  1344. QDF_IEEE80211_4ADDR_HDR_LEN);
  1345. } else {
  1346. ether_type = *(uint16_t *)(nbuf->data
  1347. + QDF_IEEE80211_3ADDR_HDR_LEN
  1348. + sizeof(qdf_llc_t)
  1349. - sizeof(ether_type));
  1350. llcHdr = (qdf_llc_t *)(nbuf->data +
  1351. QDF_IEEE80211_3ADDR_HDR_LEN);
  1352. }
  1353. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
  1354. && (ether_type ==
  1355. qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
  1356. DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
  1357. return true;
  1358. }
  1359. }
  1360. return false;
  1361. }
  1362. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
  1363. ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1364. sizeof(*llcHdr));
  1365. nbuf_clone = qdf_nbuf_clone(nbuf);
  1366. if (qdf_unlikely(nbuf_clone)) {
  1367. qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
  1368. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1369. qdf_nbuf_pull_head(nbuf_clone,
  1370. sizeof(qdf_net_vlanhdr_t));
  1371. }
  1372. }
  1373. } else {
  1374. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1375. nbuf_clone = qdf_nbuf_clone(nbuf);
  1376. if (qdf_unlikely(nbuf_clone)) {
  1377. qdf_nbuf_pull_head(nbuf_clone,
  1378. sizeof(qdf_net_vlanhdr_t));
  1379. }
  1380. }
  1381. }
  1382. if (qdf_unlikely(nbuf_clone))
  1383. nbuf = nbuf_clone;
  1384. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
  1385. || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
  1386. || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
  1387. || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
  1388. || (qdf_nbuf_is_ipv4_pkt(nbuf)
  1389. && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  1390. || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
  1391. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
  1392. if (qdf_unlikely(nbuf_clone))
  1393. qdf_nbuf_free(nbuf_clone);
  1394. return true;
  1395. }
  1396. if (qdf_unlikely(nbuf_clone))
  1397. qdf_nbuf_free(nbuf_clone);
  1398. return false;
  1399. }
  1400. /**
  1401. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1402. * @vdev: DP vdev handle
  1403. * @nbuf: skb
  1404. *
  1405. * Extract the DSCP or PCP information from frame and map into TID value.
  1406. *
  1407. * Return: void
  1408. */
  1409. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1410. struct dp_tx_msdu_info_s *msdu_info)
  1411. {
  1412. uint8_t tos = 0, dscp_tid_override = 0;
  1413. uint8_t *hdr_ptr, *L3datap;
  1414. uint8_t is_mcast = 0;
  1415. qdf_ether_header_t *eh = NULL;
  1416. qdf_ethervlan_header_t *evh = NULL;
  1417. uint16_t ether_type;
  1418. qdf_llc_t *llcHdr;
  1419. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1420. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1421. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1422. eh = (qdf_ether_header_t *)nbuf->data;
  1423. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1424. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1425. } else {
  1426. qdf_dot3_qosframe_t *qos_wh =
  1427. (qdf_dot3_qosframe_t *) nbuf->data;
  1428. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1429. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1430. return;
  1431. }
  1432. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1433. ether_type = eh->ether_type;
  1434. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1435. /*
  1436. * Check if packet is dot3 or eth2 type.
  1437. */
  1438. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1439. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1440. sizeof(*llcHdr));
  1441. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1442. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1443. sizeof(*llcHdr);
  1444. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1445. + sizeof(*llcHdr) +
  1446. sizeof(qdf_net_vlanhdr_t));
  1447. } else {
  1448. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1449. sizeof(*llcHdr);
  1450. }
  1451. } else {
  1452. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1453. evh = (qdf_ethervlan_header_t *) eh;
  1454. ether_type = evh->ether_type;
  1455. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1456. }
  1457. }
  1458. /*
  1459. * Find priority from IP TOS DSCP field
  1460. */
  1461. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1462. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1463. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1464. /* Only for unicast frames */
  1465. if (!is_mcast) {
  1466. /* send it on VO queue */
  1467. msdu_info->tid = DP_VO_TID;
  1468. }
  1469. } else {
  1470. /*
  1471. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1472. * from TOS byte.
  1473. */
  1474. tos = ip->ip_tos;
  1475. dscp_tid_override = 1;
  1476. }
  1477. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1478. /* TODO
  1479. * use flowlabel
  1480. *igmpmld cases to be handled in phase 2
  1481. */
  1482. unsigned long ver_pri_flowlabel;
  1483. unsigned long pri;
  1484. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1485. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1486. DP_IPV6_PRIORITY_SHIFT;
  1487. tos = pri;
  1488. dscp_tid_override = 1;
  1489. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1490. msdu_info->tid = DP_VO_TID;
  1491. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1492. /* Only for unicast frames */
  1493. if (!is_mcast) {
  1494. /* send ucast arp on VO queue */
  1495. msdu_info->tid = DP_VO_TID;
  1496. }
  1497. }
  1498. /*
  1499. * Assign all MCAST packets to BE
  1500. */
  1501. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1502. if (is_mcast) {
  1503. tos = 0;
  1504. dscp_tid_override = 1;
  1505. }
  1506. }
  1507. if (dscp_tid_override == 1) {
  1508. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1509. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1510. }
  1511. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1512. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1513. return;
  1514. }
  1515. /**
  1516. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1517. * @vdev: DP vdev handle
  1518. * @nbuf: skb
  1519. *
  1520. * Software based TID classification is required when more than 2 DSCP-TID
  1521. * mapping tables are needed.
  1522. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1523. *
  1524. * Return: void
  1525. */
  1526. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1527. struct dp_tx_msdu_info_s *msdu_info)
  1528. {
  1529. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1530. /*
  1531. * skip_sw_tid_classification flag will set in below cases-
  1532. * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
  1533. * 2. hlos_tid_override enabled for vdev
  1534. * 3. mesh mode enabled for vdev
  1535. */
  1536. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1537. /* Update tid in msdu_info from skb priority */
  1538. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1539. & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
  1540. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1541. return;
  1542. }
  1543. return;
  1544. }
  1545. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1546. }
  1547. #ifdef FEATURE_WLAN_TDLS
  1548. /**
  1549. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1550. * @soc: datapath SOC
  1551. * @vdev: datapath vdev
  1552. * @tx_desc: TX descriptor
  1553. *
  1554. * Return: None
  1555. */
  1556. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1557. struct dp_vdev *vdev,
  1558. struct dp_tx_desc_s *tx_desc)
  1559. {
  1560. if (vdev) {
  1561. if (vdev->is_tdls_frame) {
  1562. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1563. vdev->is_tdls_frame = false;
  1564. }
  1565. }
  1566. }
  1567. /**
  1568. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1569. * @soc: dp_soc handle
  1570. * @tx_desc: TX descriptor
  1571. * @vdev: datapath vdev handle
  1572. *
  1573. * Return: None
  1574. */
  1575. static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1576. struct dp_tx_desc_s *tx_desc)
  1577. {
  1578. struct hal_tx_completion_status ts = {0};
  1579. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1580. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1581. DP_MOD_ID_TDLS);
  1582. if (qdf_unlikely(!vdev)) {
  1583. dp_err_rl("vdev is null!");
  1584. goto error;
  1585. }
  1586. hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
  1587. if (vdev->tx_non_std_data_callback.func) {
  1588. qdf_nbuf_set_next(nbuf, NULL);
  1589. vdev->tx_non_std_data_callback.func(
  1590. vdev->tx_non_std_data_callback.ctxt,
  1591. nbuf, ts.status);
  1592. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1593. return;
  1594. } else {
  1595. dp_err_rl("callback func is null");
  1596. }
  1597. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1598. error:
  1599. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1600. qdf_nbuf_free(nbuf);
  1601. }
  1602. /**
  1603. * dp_tx_msdu_single_map() - do nbuf map
  1604. * @vdev: DP vdev handle
  1605. * @tx_desc: DP TX descriptor pointer
  1606. * @nbuf: skb pointer
  1607. *
  1608. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1609. * operation done in other component.
  1610. *
  1611. * Return: QDF_STATUS
  1612. */
  1613. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1614. struct dp_tx_desc_s *tx_desc,
  1615. qdf_nbuf_t nbuf)
  1616. {
  1617. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1618. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1619. nbuf,
  1620. QDF_DMA_TO_DEVICE,
  1621. nbuf->len);
  1622. else
  1623. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1624. QDF_DMA_TO_DEVICE);
  1625. }
  1626. #else
  1627. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1628. struct dp_vdev *vdev,
  1629. struct dp_tx_desc_s *tx_desc)
  1630. {
  1631. }
  1632. static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1633. struct dp_tx_desc_s *tx_desc)
  1634. {
  1635. }
  1636. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1637. struct dp_tx_desc_s *tx_desc,
  1638. qdf_nbuf_t nbuf)
  1639. {
  1640. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1641. nbuf,
  1642. QDF_DMA_TO_DEVICE,
  1643. nbuf->len);
  1644. }
  1645. #endif
  1646. #ifdef MESH_MODE_SUPPORT
  1647. /**
  1648. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1649. * @soc: datapath SOC
  1650. * @vdev: datapath vdev
  1651. * @tx_desc: TX descriptor
  1652. *
  1653. * Return: None
  1654. */
  1655. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1656. struct dp_vdev *vdev,
  1657. struct dp_tx_desc_s *tx_desc)
  1658. {
  1659. if (qdf_unlikely(vdev->mesh_vdev))
  1660. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1661. }
  1662. /**
  1663. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1664. * @soc: dp_soc handle
  1665. * @tx_desc: TX descriptor
  1666. * @vdev: datapath vdev handle
  1667. *
  1668. * Return: None
  1669. */
  1670. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1671. struct dp_tx_desc_s *tx_desc)
  1672. {
  1673. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1674. struct dp_vdev *vdev = NULL;
  1675. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1676. qdf_nbuf_free(nbuf);
  1677. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1678. } else {
  1679. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1680. DP_MOD_ID_MESH);
  1681. if (vdev && vdev->osif_tx_free_ext)
  1682. vdev->osif_tx_free_ext((nbuf));
  1683. else
  1684. qdf_nbuf_free(nbuf);
  1685. if (vdev)
  1686. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1687. }
  1688. }
  1689. #else
  1690. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1691. struct dp_vdev *vdev,
  1692. struct dp_tx_desc_s *tx_desc)
  1693. {
  1694. }
  1695. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1696. struct dp_tx_desc_s *tx_desc)
  1697. {
  1698. }
  1699. #endif
  1700. /**
  1701. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1702. * @vdev: DP vdev handle
  1703. * @nbuf: skb
  1704. *
  1705. * Return: 1 if frame needs to be dropped else 0
  1706. */
  1707. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1708. {
  1709. struct dp_pdev *pdev = NULL;
  1710. struct dp_ast_entry *src_ast_entry = NULL;
  1711. struct dp_ast_entry *dst_ast_entry = NULL;
  1712. struct dp_soc *soc = NULL;
  1713. qdf_assert(vdev);
  1714. pdev = vdev->pdev;
  1715. qdf_assert(pdev);
  1716. soc = pdev->soc;
  1717. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1718. (soc, dstmac, vdev->pdev->pdev_id);
  1719. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1720. (soc, srcmac, vdev->pdev->pdev_id);
  1721. if (dst_ast_entry && src_ast_entry) {
  1722. if (dst_ast_entry->peer_id ==
  1723. src_ast_entry->peer_id)
  1724. return 1;
  1725. }
  1726. return 0;
  1727. }
  1728. /**
  1729. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1730. * @vdev: DP vdev handle
  1731. * @nbuf: skb
  1732. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1733. * @meta_data: Metadata to the fw
  1734. * @tx_q: Tx queue to be used for this Tx frame
  1735. * @peer_id: peer_id of the peer in case of NAWDS frames
  1736. * @tx_exc_metadata: Handle that holds exception path metadata
  1737. *
  1738. * Return: NULL on success,
  1739. * nbuf when it fails to send
  1740. */
  1741. qdf_nbuf_t
  1742. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1743. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1744. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1745. {
  1746. struct dp_pdev *pdev = vdev->pdev;
  1747. struct dp_soc *soc = pdev->soc;
  1748. struct dp_tx_desc_s *tx_desc;
  1749. QDF_STATUS status;
  1750. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1751. uint16_t htt_tcl_metadata = 0;
  1752. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  1753. uint8_t tid = msdu_info->tid;
  1754. struct cdp_tid_tx_stats *tid_stats = NULL;
  1755. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1756. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1757. msdu_info, tx_exc_metadata);
  1758. if (!tx_desc) {
  1759. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  1760. vdev, tx_q->desc_pool_id);
  1761. drop_code = TX_DESC_ERR;
  1762. goto fail_return;
  1763. }
  1764. if (qdf_unlikely(soc->cce_disable)) {
  1765. if (dp_cce_classify(vdev, nbuf) == true) {
  1766. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1767. tid = DP_VO_TID;
  1768. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1769. }
  1770. }
  1771. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  1772. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1773. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1774. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1775. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1776. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1777. HTT_TCL_METADATA_TYPE_PEER_BASED);
  1778. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1779. peer_id);
  1780. } else
  1781. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1782. if (msdu_info->exception_fw)
  1783. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1784. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  1785. !pdev->enhanced_stats_en);
  1786. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  1787. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  1788. dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
  1789. /* Handle failure */
  1790. dp_err("qdf_nbuf_map failed");
  1791. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1792. drop_code = TX_DMA_MAP_ERR;
  1793. goto release_desc;
  1794. }
  1795. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1796. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, htt_tcl_metadata,
  1797. tx_exc_metadata, msdu_info);
  1798. if (status != QDF_STATUS_SUCCESS) {
  1799. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1800. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1801. __func__, tx_desc, tx_q->ring_id);
  1802. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  1803. QDF_DMA_TO_DEVICE,
  1804. nbuf->len);
  1805. drop_code = TX_HW_ENQUEUE;
  1806. goto release_desc;
  1807. }
  1808. return NULL;
  1809. release_desc:
  1810. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1811. fail_return:
  1812. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1813. tid_stats = &pdev->stats.tid_stats.
  1814. tid_tx_stats[tx_q->ring_id][tid];
  1815. tid_stats->swdrop_cnt[drop_code]++;
  1816. return nbuf;
  1817. }
  1818. /**
  1819. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1820. * @vdev: DP vdev handle
  1821. * @nbuf: skb
  1822. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1823. *
  1824. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1825. *
  1826. * Return: NULL on success,
  1827. * nbuf when it fails to send
  1828. */
  1829. #if QDF_LOCK_STATS
  1830. noinline
  1831. #else
  1832. #endif
  1833. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1834. struct dp_tx_msdu_info_s *msdu_info)
  1835. {
  1836. uint32_t i;
  1837. struct dp_pdev *pdev = vdev->pdev;
  1838. struct dp_soc *soc = pdev->soc;
  1839. struct dp_tx_desc_s *tx_desc;
  1840. bool is_cce_classified = false;
  1841. QDF_STATUS status;
  1842. uint16_t htt_tcl_metadata = 0;
  1843. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1844. struct cdp_tid_tx_stats *tid_stats = NULL;
  1845. uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
  1846. if (qdf_unlikely(soc->cce_disable)) {
  1847. is_cce_classified = dp_cce_classify(vdev, nbuf);
  1848. if (is_cce_classified) {
  1849. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1850. msdu_info->tid = DP_VO_TID;
  1851. }
  1852. }
  1853. if (msdu_info->frm_type == dp_tx_frm_me)
  1854. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1855. i = 0;
  1856. /* Print statement to track i and num_seg */
  1857. /*
  1858. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1859. * descriptors using information in msdu_info
  1860. */
  1861. while (i < msdu_info->num_seg) {
  1862. /*
  1863. * Setup Tx descriptor for an MSDU, and MSDU extension
  1864. * descriptor
  1865. */
  1866. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1867. tx_q->desc_pool_id);
  1868. if (!tx_desc) {
  1869. if (msdu_info->frm_type == dp_tx_frm_me) {
  1870. prep_desc_fail++;
  1871. dp_tx_me_free_buf(pdev,
  1872. (void *)(msdu_info->u.sg_info
  1873. .curr_seg->frags[0].vaddr));
  1874. if (prep_desc_fail == msdu_info->num_seg) {
  1875. /*
  1876. * Unmap is needed only if descriptor
  1877. * preparation failed for all segments.
  1878. */
  1879. qdf_nbuf_unmap(soc->osdev,
  1880. msdu_info->u.sg_info.
  1881. curr_seg->nbuf,
  1882. QDF_DMA_TO_DEVICE);
  1883. }
  1884. /*
  1885. * Free the nbuf for the current segment
  1886. * and make it point to the next in the list.
  1887. * For me, there are as many segments as there
  1888. * are no of clients.
  1889. */
  1890. qdf_nbuf_free(msdu_info->u.sg_info
  1891. .curr_seg->nbuf);
  1892. if (msdu_info->u.sg_info.curr_seg->next)
  1893. msdu_info->u.sg_info.curr_seg =
  1894. msdu_info->u.sg_info
  1895. .curr_seg->next;
  1896. i++;
  1897. continue;
  1898. }
  1899. goto done;
  1900. }
  1901. if (msdu_info->frm_type == dp_tx_frm_me) {
  1902. tx_desc->me_buffer =
  1903. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1904. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1905. }
  1906. if (is_cce_classified)
  1907. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1908. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1909. if (msdu_info->exception_fw) {
  1910. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1911. }
  1912. /*
  1913. * For frames with multiple segments (TSO, ME), jump to next
  1914. * segment.
  1915. */
  1916. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1917. if (msdu_info->u.tso_info.curr_seg->next) {
  1918. msdu_info->u.tso_info.curr_seg =
  1919. msdu_info->u.tso_info.curr_seg->next;
  1920. /*
  1921. * If this is a jumbo nbuf, then increment the
  1922. * number of nbuf users for each additional
  1923. * segment of the msdu. This will ensure that
  1924. * the skb is freed only after receiving tx
  1925. * completion for all segments of an nbuf
  1926. */
  1927. qdf_nbuf_inc_users(nbuf);
  1928. /* Check with MCL if this is needed */
  1929. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
  1930. */
  1931. }
  1932. }
  1933. /*
  1934. * Enqueue the Tx MSDU descriptor to HW for transmit
  1935. */
  1936. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, htt_tcl_metadata,
  1937. NULL, msdu_info);
  1938. if (status != QDF_STATUS_SUCCESS) {
  1939. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1940. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1941. __func__, tx_desc, tx_q->ring_id);
  1942. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1943. tid_stats = &pdev->stats.tid_stats.
  1944. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  1945. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  1946. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1947. if (msdu_info->frm_type == dp_tx_frm_me) {
  1948. hw_enq_fail++;
  1949. if (hw_enq_fail == msdu_info->num_seg) {
  1950. /*
  1951. * Unmap is needed only if enqueue
  1952. * failed for all segments.
  1953. */
  1954. qdf_nbuf_unmap(soc->osdev,
  1955. msdu_info->u.sg_info.
  1956. curr_seg->nbuf,
  1957. QDF_DMA_TO_DEVICE);
  1958. }
  1959. /*
  1960. * Free the nbuf for the current segment
  1961. * and make it point to the next in the list.
  1962. * For me, there are as many segments as there
  1963. * are no of clients.
  1964. */
  1965. qdf_nbuf_free(msdu_info->u.sg_info
  1966. .curr_seg->nbuf);
  1967. if (msdu_info->u.sg_info.curr_seg->next)
  1968. msdu_info->u.sg_info.curr_seg =
  1969. msdu_info->u.sg_info
  1970. .curr_seg->next;
  1971. i++;
  1972. continue;
  1973. }
  1974. /*
  1975. * For TSO frames, the nbuf users increment done for
  1976. * the current segment has to be reverted, since the
  1977. * hw enqueue for this segment failed
  1978. */
  1979. if (msdu_info->frm_type == dp_tx_frm_tso &&
  1980. msdu_info->u.tso_info.curr_seg) {
  1981. qdf_nbuf_free(nbuf);
  1982. }
  1983. goto done;
  1984. }
  1985. /*
  1986. * TODO
  1987. * if tso_info structure can be modified to have curr_seg
  1988. * as first element, following 2 blocks of code (for TSO and SG)
  1989. * can be combined into 1
  1990. */
  1991. /*
  1992. * For Multicast-Unicast converted packets,
  1993. * each converted frame (for a client) is represented as
  1994. * 1 segment
  1995. */
  1996. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  1997. (msdu_info->frm_type == dp_tx_frm_me)) {
  1998. if (msdu_info->u.sg_info.curr_seg->next) {
  1999. msdu_info->u.sg_info.curr_seg =
  2000. msdu_info->u.sg_info.curr_seg->next;
  2001. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2002. }
  2003. }
  2004. i++;
  2005. }
  2006. nbuf = NULL;
  2007. done:
  2008. return nbuf;
  2009. }
  2010. /**
  2011. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  2012. * for SG frames
  2013. * @vdev: DP vdev handle
  2014. * @nbuf: skb
  2015. * @seg_info: Pointer to Segment info Descriptor to be prepared
  2016. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2017. *
  2018. * Return: NULL on success,
  2019. * nbuf when it fails to send
  2020. */
  2021. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2022. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  2023. {
  2024. uint32_t cur_frag, nr_frags, i;
  2025. qdf_dma_addr_t paddr;
  2026. struct dp_tx_sg_info_s *sg_info;
  2027. sg_info = &msdu_info->u.sg_info;
  2028. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2029. if (QDF_STATUS_SUCCESS !=
  2030. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  2031. QDF_DMA_TO_DEVICE,
  2032. qdf_nbuf_headlen(nbuf))) {
  2033. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2034. "dma map error");
  2035. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2036. qdf_nbuf_free(nbuf);
  2037. return NULL;
  2038. }
  2039. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  2040. seg_info->frags[0].paddr_lo = paddr;
  2041. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  2042. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  2043. seg_info->frags[0].vaddr = (void *) nbuf;
  2044. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  2045. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  2046. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  2047. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2048. "frag dma map error");
  2049. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2050. goto map_err;
  2051. }
  2052. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  2053. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  2054. seg_info->frags[cur_frag + 1].paddr_hi =
  2055. ((uint64_t) paddr) >> 32;
  2056. seg_info->frags[cur_frag + 1].len =
  2057. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  2058. }
  2059. seg_info->frag_cnt = (cur_frag + 1);
  2060. seg_info->total_len = qdf_nbuf_len(nbuf);
  2061. seg_info->next = NULL;
  2062. sg_info->curr_seg = seg_info;
  2063. msdu_info->frm_type = dp_tx_frm_sg;
  2064. msdu_info->num_seg = 1;
  2065. return nbuf;
  2066. map_err:
  2067. /* restore paddr into nbuf before calling unmap */
  2068. qdf_nbuf_mapped_paddr_set(nbuf,
  2069. (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
  2070. ((uint64_t)
  2071. seg_info->frags[0].paddr_hi) << 32));
  2072. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2073. QDF_DMA_TO_DEVICE,
  2074. seg_info->frags[0].len);
  2075. for (i = 1; i <= cur_frag; i++) {
  2076. qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
  2077. (seg_info->frags[i].paddr_lo | ((uint64_t)
  2078. seg_info->frags[i].paddr_hi) << 32),
  2079. seg_info->frags[i].len,
  2080. QDF_DMA_TO_DEVICE);
  2081. }
  2082. qdf_nbuf_free(nbuf);
  2083. return NULL;
  2084. }
  2085. /**
  2086. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  2087. * @vdev: DP vdev handle
  2088. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2089. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  2090. *
  2091. * Return: NULL on failure,
  2092. * nbuf when extracted successfully
  2093. */
  2094. static
  2095. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  2096. struct dp_tx_msdu_info_s *msdu_info,
  2097. uint16_t ppdu_cookie)
  2098. {
  2099. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2100. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2101. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2102. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  2103. (msdu_info->meta_data[5], 1);
  2104. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  2105. (msdu_info->meta_data[5], 1);
  2106. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  2107. (msdu_info->meta_data[6], ppdu_cookie);
  2108. msdu_info->exception_fw = 1;
  2109. msdu_info->is_tx_sniffer = 1;
  2110. }
  2111. #ifdef MESH_MODE_SUPPORT
  2112. /**
  2113. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  2114. and prepare msdu_info for mesh frames.
  2115. * @vdev: DP vdev handle
  2116. * @nbuf: skb
  2117. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2118. *
  2119. * Return: NULL on failure,
  2120. * nbuf when extracted successfully
  2121. */
  2122. static
  2123. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2124. struct dp_tx_msdu_info_s *msdu_info)
  2125. {
  2126. struct meta_hdr_s *mhdr;
  2127. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2128. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2129. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2130. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  2131. msdu_info->exception_fw = 0;
  2132. goto remove_meta_hdr;
  2133. }
  2134. msdu_info->exception_fw = 1;
  2135. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2136. meta_data->host_tx_desc_pool = 1;
  2137. meta_data->update_peer_cache = 1;
  2138. meta_data->learning_frame = 1;
  2139. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  2140. meta_data->power = mhdr->power;
  2141. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  2142. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  2143. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  2144. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  2145. meta_data->dyn_bw = 1;
  2146. meta_data->valid_pwr = 1;
  2147. meta_data->valid_mcs_mask = 1;
  2148. meta_data->valid_nss_mask = 1;
  2149. meta_data->valid_preamble_type = 1;
  2150. meta_data->valid_retries = 1;
  2151. meta_data->valid_bw_info = 1;
  2152. }
  2153. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  2154. meta_data->encrypt_type = 0;
  2155. meta_data->valid_encrypt_type = 1;
  2156. meta_data->learning_frame = 0;
  2157. }
  2158. meta_data->valid_key_flags = 1;
  2159. meta_data->key_flags = (mhdr->keyix & 0x3);
  2160. remove_meta_hdr:
  2161. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2163. "qdf_nbuf_pull_head failed");
  2164. qdf_nbuf_free(nbuf);
  2165. return NULL;
  2166. }
  2167. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  2168. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2169. "%s , Meta hdr %0x %0x %0x %0x %0x %0x"
  2170. " tid %d to_fw %d",
  2171. __func__, msdu_info->meta_data[0],
  2172. msdu_info->meta_data[1],
  2173. msdu_info->meta_data[2],
  2174. msdu_info->meta_data[3],
  2175. msdu_info->meta_data[4],
  2176. msdu_info->meta_data[5],
  2177. msdu_info->tid, msdu_info->exception_fw);
  2178. return nbuf;
  2179. }
  2180. #else
  2181. static
  2182. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2183. struct dp_tx_msdu_info_s *msdu_info)
  2184. {
  2185. return nbuf;
  2186. }
  2187. #endif
  2188. /**
  2189. * dp_check_exc_metadata() - Checks if parameters are valid
  2190. * @tx_exc - holds all exception path parameters
  2191. *
  2192. * Returns true when all the parameters are valid else false
  2193. *
  2194. */
  2195. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  2196. {
  2197. bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
  2198. HTT_INVALID_TID);
  2199. bool invalid_encap_type =
  2200. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  2201. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  2202. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  2203. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  2204. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  2205. tx_exc->ppdu_cookie == 0);
  2206. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  2207. invalid_cookie) {
  2208. return false;
  2209. }
  2210. return true;
  2211. }
  2212. #ifdef ATH_SUPPORT_IQUE
  2213. /**
  2214. * dp_tx_mcast_enhance() - Multicast enhancement on TX
  2215. * @vdev: vdev handle
  2216. * @nbuf: skb
  2217. *
  2218. * Return: true on success,
  2219. * false on failure
  2220. */
  2221. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2222. {
  2223. qdf_ether_header_t *eh;
  2224. /* Mcast to Ucast Conversion*/
  2225. if (qdf_likely(!vdev->mcast_enhancement_en))
  2226. return true;
  2227. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2228. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2229. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2230. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2231. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
  2232. qdf_nbuf_len(nbuf));
  2233. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2234. QDF_STATUS_SUCCESS) {
  2235. return false;
  2236. }
  2237. if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
  2238. if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
  2239. QDF_STATUS_SUCCESS) {
  2240. return false;
  2241. }
  2242. }
  2243. }
  2244. return true;
  2245. }
  2246. #else
  2247. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2248. {
  2249. return true;
  2250. }
  2251. #endif
  2252. /**
  2253. * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
  2254. * @nbuf: qdf_nbuf_t
  2255. * @vdev: struct dp_vdev *
  2256. *
  2257. * Allow packet for processing only if it is for peer client which is
  2258. * connected with same vap. Drop packet if client is connected to
  2259. * different vap.
  2260. *
  2261. * Return: QDF_STATUS
  2262. */
  2263. static inline QDF_STATUS
  2264. dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  2265. {
  2266. struct dp_ast_entry *dst_ast_entry = NULL;
  2267. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2268. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
  2269. DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
  2270. return QDF_STATUS_SUCCESS;
  2271. qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
  2272. dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
  2273. eh->ether_dhost,
  2274. vdev->vdev_id);
  2275. /* If there is no ast entry, return failure */
  2276. if (qdf_unlikely(!dst_ast_entry)) {
  2277. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2278. return QDF_STATUS_E_FAILURE;
  2279. }
  2280. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2281. return QDF_STATUS_SUCCESS;
  2282. }
  2283. /**
  2284. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  2285. * @soc: DP soc handle
  2286. * @vdev_id: id of DP vdev handle
  2287. * @nbuf: skb
  2288. * @tx_exc_metadata: Handle that holds exception path meta data
  2289. *
  2290. * Entry point for Core Tx layer (DP_TX) invoked from
  2291. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2292. *
  2293. * Return: NULL on success,
  2294. * nbuf when it fails to send
  2295. */
  2296. qdf_nbuf_t
  2297. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2298. qdf_nbuf_t nbuf,
  2299. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2300. {
  2301. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2302. qdf_ether_header_t *eh = NULL;
  2303. struct dp_tx_msdu_info_s msdu_info;
  2304. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2305. DP_MOD_ID_TX_EXCEPTION);
  2306. if (qdf_unlikely(!vdev))
  2307. goto fail;
  2308. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2309. if (!tx_exc_metadata)
  2310. goto fail;
  2311. msdu_info.tid = tx_exc_metadata->tid;
  2312. dp_tx_wds_ext(soc, vdev, tx_exc_metadata->peer_id, &msdu_info);
  2313. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2314. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2315. QDF_MAC_ADDR_REF(nbuf->data));
  2316. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2317. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  2318. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2319. "Invalid parameters in exception path");
  2320. goto fail;
  2321. }
  2322. /* Basic sanity checks for unsupported packets */
  2323. /* MESH mode */
  2324. if (qdf_unlikely(vdev->mesh_vdev)) {
  2325. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2326. "Mesh mode is not supported in exception path");
  2327. goto fail;
  2328. }
  2329. /*
  2330. * Classify the frame and call corresponding
  2331. * "prepare" function which extracts the segment (TSO)
  2332. * and fragmentation information (for TSO , SG, ME, or Raw)
  2333. * into MSDU_INFO structure which is later used to fill
  2334. * SW and HW descriptors.
  2335. */
  2336. if (qdf_nbuf_is_tso(nbuf)) {
  2337. dp_verbose_debug("TSO frame %pK", vdev);
  2338. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2339. qdf_nbuf_len(nbuf));
  2340. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2341. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2342. qdf_nbuf_len(nbuf));
  2343. return nbuf;
  2344. }
  2345. goto send_multiple;
  2346. }
  2347. /* SG */
  2348. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2349. struct dp_tx_seg_info_s seg_info = {0};
  2350. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2351. if (!nbuf)
  2352. return NULL;
  2353. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2354. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2355. qdf_nbuf_len(nbuf));
  2356. goto send_multiple;
  2357. }
  2358. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  2359. return NULL;
  2360. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2361. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2362. qdf_nbuf_len(nbuf));
  2363. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2364. tx_exc_metadata->ppdu_cookie);
  2365. }
  2366. /*
  2367. * Get HW Queue to use for this frame.
  2368. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2369. * dedicated for data and 1 for command.
  2370. * "queue_id" maps to one hardware ring.
  2371. * With each ring, we also associate a unique Tx descriptor pool
  2372. * to minimize lock contention for these resources.
  2373. */
  2374. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2375. /*
  2376. * Check exception descriptors
  2377. */
  2378. if (dp_tx_exception_limit_check(vdev))
  2379. goto fail;
  2380. /* Single linear frame */
  2381. /*
  2382. * If nbuf is a simple linear frame, use send_single function to
  2383. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2384. * SRNG. There is no need to setup a MSDU extension descriptor.
  2385. */
  2386. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2387. tx_exc_metadata->peer_id, tx_exc_metadata);
  2388. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2389. return nbuf;
  2390. send_multiple:
  2391. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2392. fail:
  2393. if (vdev)
  2394. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2395. dp_verbose_debug("pkt send failed");
  2396. return nbuf;
  2397. }
  2398. /**
  2399. * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
  2400. * in exception path in special case to avoid regular exception path chk.
  2401. * @soc: DP soc handle
  2402. * @vdev_id: id of DP vdev handle
  2403. * @nbuf: skb
  2404. * @tx_exc_metadata: Handle that holds exception path meta data
  2405. *
  2406. * Entry point for Core Tx layer (DP_TX) invoked from
  2407. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2408. *
  2409. * Return: NULL on success,
  2410. * nbuf when it fails to send
  2411. */
  2412. qdf_nbuf_t
  2413. dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2414. uint8_t vdev_id, qdf_nbuf_t nbuf,
  2415. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2416. {
  2417. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2418. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2419. DP_MOD_ID_TX_EXCEPTION);
  2420. if (qdf_unlikely(!vdev))
  2421. goto fail;
  2422. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2423. == QDF_STATUS_E_FAILURE)) {
  2424. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2425. goto fail;
  2426. }
  2427. /* Unref count as it will agin be taken inside dp_tx_exception */
  2428. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2429. return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
  2430. fail:
  2431. if (vdev)
  2432. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2433. dp_verbose_debug("pkt send failed");
  2434. return nbuf;
  2435. }
  2436. /**
  2437. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  2438. * @soc: DP soc handle
  2439. * @vdev_id: DP vdev handle
  2440. * @nbuf: skb
  2441. *
  2442. * Entry point for Core Tx layer (DP_TX) invoked from
  2443. * hard_start_xmit in OSIF/HDD
  2444. *
  2445. * Return: NULL on success,
  2446. * nbuf when it fails to send
  2447. */
  2448. #ifdef MESH_MODE_SUPPORT
  2449. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2450. qdf_nbuf_t nbuf)
  2451. {
  2452. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2453. struct meta_hdr_s *mhdr;
  2454. qdf_nbuf_t nbuf_mesh = NULL;
  2455. qdf_nbuf_t nbuf_clone = NULL;
  2456. struct dp_vdev *vdev;
  2457. uint8_t no_enc_frame = 0;
  2458. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  2459. if (!nbuf_mesh) {
  2460. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2461. "qdf_nbuf_unshare failed");
  2462. return nbuf;
  2463. }
  2464. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  2465. if (!vdev) {
  2466. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2467. "vdev is NULL for vdev_id %d", vdev_id);
  2468. return nbuf;
  2469. }
  2470. nbuf = nbuf_mesh;
  2471. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2472. if ((vdev->sec_type != cdp_sec_type_none) &&
  2473. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  2474. no_enc_frame = 1;
  2475. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  2476. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  2477. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  2478. !no_enc_frame) {
  2479. nbuf_clone = qdf_nbuf_clone(nbuf);
  2480. if (!nbuf_clone) {
  2481. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2482. "qdf_nbuf_clone failed");
  2483. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2484. return nbuf;
  2485. }
  2486. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  2487. }
  2488. if (nbuf_clone) {
  2489. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  2490. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2491. } else {
  2492. qdf_nbuf_free(nbuf_clone);
  2493. }
  2494. }
  2495. if (no_enc_frame)
  2496. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  2497. else
  2498. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  2499. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  2500. if ((!nbuf) && no_enc_frame) {
  2501. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2502. }
  2503. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2504. return nbuf;
  2505. }
  2506. #else
  2507. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  2508. qdf_nbuf_t nbuf)
  2509. {
  2510. return dp_tx_send(soc, vdev_id, nbuf);
  2511. }
  2512. #endif
  2513. /**
  2514. * dp_tx_nawds_handler() - NAWDS handler
  2515. *
  2516. * @soc: DP soc handle
  2517. * @vdev_id: id of DP vdev handle
  2518. * @msdu_info: msdu_info required to create HTT metadata
  2519. * @nbuf: skb
  2520. *
  2521. * This API transfers the multicast frames with the peer id
  2522. * on NAWDS enabled peer.
  2523. * Return: none
  2524. */
  2525. static inline
  2526. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2527. struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
  2528. {
  2529. struct dp_peer *peer = NULL;
  2530. qdf_nbuf_t nbuf_clone = NULL;
  2531. uint16_t peer_id = DP_INVALID_PEER;
  2532. uint16_t sa_peer_id = DP_INVALID_PEER;
  2533. struct dp_ast_entry *ast_entry = NULL;
  2534. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2535. if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) {
  2536. qdf_spin_lock_bh(&soc->ast_lock);
  2537. ast_entry = dp_peer_ast_hash_find_by_pdevid
  2538. (soc,
  2539. (uint8_t *)(eh->ether_shost),
  2540. vdev->pdev->pdev_id);
  2541. if (ast_entry)
  2542. sa_peer_id = ast_entry->peer_id;
  2543. qdf_spin_unlock_bh(&soc->ast_lock);
  2544. }
  2545. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2546. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2547. if (!peer->bss_peer && peer->nawds_enabled) {
  2548. peer_id = peer->peer_id;
  2549. /* Multicast packets needs to be
  2550. * dropped in case of intra bss forwarding
  2551. */
  2552. if (sa_peer_id == peer->peer_id) {
  2553. QDF_TRACE(QDF_MODULE_ID_DP,
  2554. QDF_TRACE_LEVEL_DEBUG,
  2555. " %s: multicast packet", __func__);
  2556. DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
  2557. continue;
  2558. }
  2559. nbuf_clone = qdf_nbuf_clone(nbuf);
  2560. if (!nbuf_clone) {
  2561. QDF_TRACE(QDF_MODULE_ID_DP,
  2562. QDF_TRACE_LEVEL_ERROR,
  2563. FL("nbuf clone failed"));
  2564. break;
  2565. }
  2566. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2567. msdu_info, peer_id,
  2568. NULL);
  2569. if (nbuf_clone) {
  2570. QDF_TRACE(QDF_MODULE_ID_DP,
  2571. QDF_TRACE_LEVEL_DEBUG,
  2572. FL("pkt send failed"));
  2573. qdf_nbuf_free(nbuf_clone);
  2574. } else {
  2575. if (peer_id != DP_INVALID_PEER)
  2576. DP_STATS_INC_PKT(peer, tx.nawds_mcast,
  2577. 1, qdf_nbuf_len(nbuf));
  2578. }
  2579. }
  2580. }
  2581. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2582. }
  2583. /**
  2584. * dp_tx_send() - Transmit a frame on a given VAP
  2585. * @soc: DP soc handle
  2586. * @vdev_id: id of DP vdev handle
  2587. * @nbuf: skb
  2588. *
  2589. * Entry point for Core Tx layer (DP_TX) invoked from
  2590. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  2591. * cases
  2592. *
  2593. * Return: NULL on success,
  2594. * nbuf when it fails to send
  2595. */
  2596. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2597. qdf_nbuf_t nbuf)
  2598. {
  2599. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2600. uint16_t peer_id = HTT_INVALID_PEER;
  2601. /*
  2602. * doing a memzero is causing additional function call overhead
  2603. * so doing static stack clearing
  2604. */
  2605. struct dp_tx_msdu_info_s msdu_info = {0};
  2606. struct dp_vdev *vdev = NULL;
  2607. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2608. return nbuf;
  2609. /*
  2610. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2611. * this in per packet path.
  2612. *
  2613. * As in this path vdev memory is already protected with netdev
  2614. * tx lock
  2615. */
  2616. vdev = soc->vdev_id_map[vdev_id];
  2617. if (qdf_unlikely(!vdev))
  2618. return nbuf;
  2619. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2620. QDF_MAC_ADDR_REF(nbuf->data));
  2621. /*
  2622. * Set Default Host TID value to invalid TID
  2623. * (TID override disabled)
  2624. */
  2625. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  2626. dp_tx_wds_ext(soc, vdev, peer_id, &msdu_info);
  2627. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2628. if (qdf_unlikely(vdev->mesh_vdev)) {
  2629. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  2630. &msdu_info);
  2631. if (!nbuf_mesh) {
  2632. dp_verbose_debug("Extracting mesh metadata failed");
  2633. return nbuf;
  2634. }
  2635. nbuf = nbuf_mesh;
  2636. }
  2637. /*
  2638. * Get HW Queue to use for this frame.
  2639. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2640. * dedicated for data and 1 for command.
  2641. * "queue_id" maps to one hardware ring.
  2642. * With each ring, we also associate a unique Tx descriptor pool
  2643. * to minimize lock contention for these resources.
  2644. */
  2645. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2646. /*
  2647. * TCL H/W supports 2 DSCP-TID mapping tables.
  2648. * Table 1 - Default DSCP-TID mapping table
  2649. * Table 2 - 1 DSCP-TID override table
  2650. *
  2651. * If we need a different DSCP-TID mapping for this vap,
  2652. * call tid_classify to extract DSCP/ToS from frame and
  2653. * map to a TID and store in msdu_info. This is later used
  2654. * to fill in TCL Input descriptor (per-packet TID override).
  2655. */
  2656. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  2657. /*
  2658. * Classify the frame and call corresponding
  2659. * "prepare" function which extracts the segment (TSO)
  2660. * and fragmentation information (for TSO , SG, ME, or Raw)
  2661. * into MSDU_INFO structure which is later used to fill
  2662. * SW and HW descriptors.
  2663. */
  2664. if (qdf_nbuf_is_tso(nbuf)) {
  2665. dp_verbose_debug("TSO frame %pK", vdev);
  2666. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2667. qdf_nbuf_len(nbuf));
  2668. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2669. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2670. qdf_nbuf_len(nbuf));
  2671. return nbuf;
  2672. }
  2673. goto send_multiple;
  2674. }
  2675. /* SG */
  2676. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2677. struct dp_tx_seg_info_s seg_info = {0};
  2678. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2679. if (!nbuf)
  2680. return NULL;
  2681. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2682. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2683. qdf_nbuf_len(nbuf));
  2684. goto send_multiple;
  2685. }
  2686. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  2687. return NULL;
  2688. /* RAW */
  2689. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  2690. struct dp_tx_seg_info_s seg_info = {0};
  2691. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  2692. if (!nbuf)
  2693. return NULL;
  2694. dp_verbose_debug("Raw frame %pK", vdev);
  2695. goto send_multiple;
  2696. }
  2697. if (qdf_unlikely(vdev->nawds_enabled)) {
  2698. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2699. qdf_nbuf_data(nbuf);
  2700. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
  2701. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
  2702. peer_id = DP_INVALID_PEER;
  2703. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2704. 1, qdf_nbuf_len(nbuf));
  2705. }
  2706. /* Single linear frame */
  2707. /*
  2708. * If nbuf is a simple linear frame, use send_single function to
  2709. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2710. * SRNG. There is no need to setup a MSDU extension descriptor.
  2711. */
  2712. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  2713. return nbuf;
  2714. send_multiple:
  2715. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2716. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  2717. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  2718. return nbuf;
  2719. }
  2720. /**
  2721. * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
  2722. * case to vaoid check in perpkt path.
  2723. * @soc: DP soc handle
  2724. * @vdev_id: id of DP vdev handle
  2725. * @nbuf: skb
  2726. *
  2727. * Entry point for Core Tx layer (DP_TX) invoked from
  2728. * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
  2729. * with special condition to avoid per pkt check in dp_tx_send
  2730. *
  2731. * Return: NULL on success,
  2732. * nbuf when it fails to send
  2733. */
  2734. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2735. uint8_t vdev_id, qdf_nbuf_t nbuf)
  2736. {
  2737. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2738. struct dp_vdev *vdev = NULL;
  2739. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2740. return nbuf;
  2741. /*
  2742. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2743. * this in per packet path.
  2744. *
  2745. * As in this path vdev memory is already protected with netdev
  2746. * tx lock
  2747. */
  2748. vdev = soc->vdev_id_map[vdev_id];
  2749. if (qdf_unlikely(!vdev))
  2750. return nbuf;
  2751. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2752. == QDF_STATUS_E_FAILURE)) {
  2753. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2754. return nbuf;
  2755. }
  2756. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  2757. }
  2758. /**
  2759. * dp_tx_reinject_handler() - Tx Reinject Handler
  2760. * @soc: datapath soc handle
  2761. * @vdev: datapath vdev handle
  2762. * @tx_desc: software descriptor head pointer
  2763. * @status : Tx completion status from HTT descriptor
  2764. *
  2765. * This function reinjects frames back to Target.
  2766. * Todo - Host queue needs to be added
  2767. *
  2768. * Return: none
  2769. */
  2770. static
  2771. void dp_tx_reinject_handler(struct dp_soc *soc,
  2772. struct dp_vdev *vdev,
  2773. struct dp_tx_desc_s *tx_desc,
  2774. uint8_t *status)
  2775. {
  2776. struct dp_peer *peer = NULL;
  2777. uint32_t peer_id = HTT_INVALID_PEER;
  2778. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2779. qdf_nbuf_t nbuf_copy = NULL;
  2780. struct dp_tx_msdu_info_s msdu_info;
  2781. #ifdef WDS_VENDOR_EXTENSION
  2782. int is_mcast = 0, is_ucast = 0;
  2783. int num_peers_3addr = 0;
  2784. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  2785. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  2786. #endif
  2787. qdf_assert(vdev);
  2788. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2789. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2790. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2791. "%s Tx reinject path", __func__);
  2792. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  2793. qdf_nbuf_len(tx_desc->nbuf));
  2794. #ifdef WDS_VENDOR_EXTENSION
  2795. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  2796. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  2797. } else {
  2798. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  2799. }
  2800. is_ucast = !is_mcast;
  2801. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2802. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2803. if (peer->bss_peer)
  2804. continue;
  2805. /* Detect wds peers that use 3-addr framing for mcast.
  2806. * if there are any, the bss_peer is used to send the
  2807. * the mcast frame using 3-addr format. all wds enabled
  2808. * peers that use 4-addr framing for mcast frames will
  2809. * be duplicated and sent as 4-addr frames below.
  2810. */
  2811. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  2812. num_peers_3addr = 1;
  2813. break;
  2814. }
  2815. }
  2816. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2817. #endif
  2818. if (qdf_unlikely(vdev->mesh_vdev)) {
  2819. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  2820. } else {
  2821. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2822. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2823. if ((peer->peer_id != HTT_INVALID_PEER) &&
  2824. #ifdef WDS_VENDOR_EXTENSION
  2825. /*
  2826. * . if 3-addr STA, then send on BSS Peer
  2827. * . if Peer WDS enabled and accept 4-addr mcast,
  2828. * send mcast on that peer only
  2829. * . if Peer WDS enabled and accept 4-addr ucast,
  2830. * send ucast on that peer only
  2831. */
  2832. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  2833. (peer->wds_enabled &&
  2834. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  2835. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  2836. #else
  2837. ((peer->bss_peer &&
  2838. !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) {
  2839. #endif
  2840. peer_id = DP_INVALID_PEER;
  2841. nbuf_copy = qdf_nbuf_copy(nbuf);
  2842. if (!nbuf_copy) {
  2843. QDF_TRACE(QDF_MODULE_ID_DP,
  2844. QDF_TRACE_LEVEL_DEBUG,
  2845. FL("nbuf copy failed"));
  2846. break;
  2847. }
  2848. nbuf_copy = dp_tx_send_msdu_single(vdev,
  2849. nbuf_copy,
  2850. &msdu_info,
  2851. peer_id,
  2852. NULL);
  2853. if (nbuf_copy) {
  2854. QDF_TRACE(QDF_MODULE_ID_DP,
  2855. QDF_TRACE_LEVEL_DEBUG,
  2856. FL("pkt send failed"));
  2857. qdf_nbuf_free(nbuf_copy);
  2858. }
  2859. }
  2860. }
  2861. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2862. }
  2863. qdf_nbuf_free(nbuf);
  2864. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2865. }
  2866. /**
  2867. * dp_tx_inspect_handler() - Tx Inspect Handler
  2868. * @soc: datapath soc handle
  2869. * @vdev: datapath vdev handle
  2870. * @tx_desc: software descriptor head pointer
  2871. * @status : Tx completion status from HTT descriptor
  2872. *
  2873. * Handles Tx frames sent back to Host for inspection
  2874. * (ProxyARP)
  2875. *
  2876. * Return: none
  2877. */
  2878. static void dp_tx_inspect_handler(struct dp_soc *soc,
  2879. struct dp_vdev *vdev,
  2880. struct dp_tx_desc_s *tx_desc,
  2881. uint8_t *status)
  2882. {
  2883. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2884. "%s Tx inspect path",
  2885. __func__);
  2886. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  2887. qdf_nbuf_len(tx_desc->nbuf));
  2888. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  2889. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2890. }
  2891. #ifdef FEATURE_PERPKT_INFO
  2892. /**
  2893. * dp_get_completion_indication_for_stack() - send completion to stack
  2894. * @soc : dp_soc handle
  2895. * @pdev: dp_pdev handle
  2896. * @peer: dp peer handle
  2897. * @ts: transmit completion status structure
  2898. * @netbuf: Buffer pointer for free
  2899. *
  2900. * This function is used for indication whether buffer needs to be
  2901. * sent to stack for freeing or not
  2902. */
  2903. QDF_STATUS
  2904. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2905. struct dp_pdev *pdev,
  2906. struct dp_peer *peer,
  2907. struct hal_tx_completion_status *ts,
  2908. qdf_nbuf_t netbuf,
  2909. uint64_t time_latency)
  2910. {
  2911. struct tx_capture_hdr *ppdu_hdr;
  2912. uint16_t peer_id = ts->peer_id;
  2913. uint32_t ppdu_id = ts->ppdu_id;
  2914. uint8_t first_msdu = ts->first_msdu;
  2915. uint8_t last_msdu = ts->last_msdu;
  2916. uint32_t txcap_hdr_size = sizeof(struct tx_capture_hdr);
  2917. if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  2918. !pdev->latency_capture_enable))
  2919. return QDF_STATUS_E_NOSUPPORT;
  2920. if (!peer) {
  2921. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2922. FL("Peer Invalid"));
  2923. return QDF_STATUS_E_INVAL;
  2924. }
  2925. if (pdev->mcopy_mode) {
  2926. /* If mcopy is enabled and mcopy_mode is M_COPY deliver 1st MSDU
  2927. * per PPDU. If mcopy_mode is M_COPY_EXTENDED deliver 1st MSDU
  2928. * for each MPDU
  2929. */
  2930. if (pdev->mcopy_mode == M_COPY) {
  2931. if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  2932. (pdev->m_copy_id.tx_peer_id == peer_id)) {
  2933. return QDF_STATUS_E_INVAL;
  2934. }
  2935. }
  2936. if (!first_msdu)
  2937. return QDF_STATUS_E_INVAL;
  2938. pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  2939. pdev->m_copy_id.tx_peer_id = peer_id;
  2940. }
  2941. if (qdf_unlikely(qdf_nbuf_headroom(netbuf) < txcap_hdr_size)) {
  2942. netbuf = qdf_nbuf_realloc_headroom(netbuf, txcap_hdr_size);
  2943. if (!netbuf) {
  2944. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2945. FL("No headroom"));
  2946. return QDF_STATUS_E_NOMEM;
  2947. }
  2948. }
  2949. if (!qdf_nbuf_push_head(netbuf, txcap_hdr_size)) {
  2950. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2951. FL("No headroom"));
  2952. return QDF_STATUS_E_NOMEM;
  2953. }
  2954. ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
  2955. qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
  2956. QDF_MAC_ADDR_SIZE);
  2957. qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
  2958. QDF_MAC_ADDR_SIZE);
  2959. ppdu_hdr->ppdu_id = ppdu_id;
  2960. ppdu_hdr->peer_id = peer_id;
  2961. ppdu_hdr->first_msdu = first_msdu;
  2962. ppdu_hdr->last_msdu = last_msdu;
  2963. if (qdf_unlikely(pdev->latency_capture_enable)) {
  2964. ppdu_hdr->tsf = ts->tsf;
  2965. ppdu_hdr->time_latency = time_latency;
  2966. }
  2967. return QDF_STATUS_SUCCESS;
  2968. }
  2969. /**
  2970. * dp_send_completion_to_stack() - send completion to stack
  2971. * @soc : dp_soc handle
  2972. * @pdev: dp_pdev handle
  2973. * @peer_id: peer_id of the peer for which completion came
  2974. * @ppdu_id: ppdu_id
  2975. * @netbuf: Buffer pointer for free
  2976. *
  2977. * This function is used to send completion to stack
  2978. * to free buffer
  2979. */
  2980. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  2981. uint16_t peer_id, uint32_t ppdu_id,
  2982. qdf_nbuf_t netbuf)
  2983. {
  2984. dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
  2985. netbuf, peer_id,
  2986. WDI_NO_VAL, pdev->pdev_id);
  2987. }
  2988. #else
  2989. static QDF_STATUS
  2990. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2991. struct dp_pdev *pdev,
  2992. struct dp_peer *peer,
  2993. struct hal_tx_completion_status *ts,
  2994. qdf_nbuf_t netbuf,
  2995. uint64_t time_latency)
  2996. {
  2997. return QDF_STATUS_E_NOSUPPORT;
  2998. }
  2999. static void
  3000. dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  3001. uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
  3002. {
  3003. }
  3004. #endif
  3005. /**
  3006. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  3007. * @soc: Soc handle
  3008. * @desc: software Tx descriptor to be processed
  3009. *
  3010. * Return: none
  3011. */
  3012. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  3013. struct dp_tx_desc_s *desc)
  3014. {
  3015. qdf_nbuf_t nbuf = desc->nbuf;
  3016. /* nbuf already freed in vdev detach path */
  3017. if (!nbuf)
  3018. return;
  3019. /* If it is TDLS mgmt, don't unmap or free the frame */
  3020. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  3021. return dp_non_std_tx_comp_free_buff(soc, desc);
  3022. /* 0 : MSDU buffer, 1 : MLE */
  3023. if (desc->msdu_ext_desc) {
  3024. /* TSO free */
  3025. if (hal_tx_ext_desc_get_tso_enable(
  3026. desc->msdu_ext_desc->vaddr)) {
  3027. /* unmap eash TSO seg before free the nbuf */
  3028. dp_tx_tso_unmap_segment(soc, desc->tso_desc,
  3029. desc->tso_num_desc);
  3030. qdf_nbuf_free(nbuf);
  3031. return;
  3032. }
  3033. }
  3034. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  3035. QDF_DMA_TO_DEVICE, nbuf->len);
  3036. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  3037. return dp_mesh_tx_comp_free_buff(soc, desc);
  3038. qdf_nbuf_free(nbuf);
  3039. }
  3040. #ifdef MESH_MODE_SUPPORT
  3041. /**
  3042. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  3043. * in mesh meta header
  3044. * @tx_desc: software descriptor head pointer
  3045. * @ts: pointer to tx completion stats
  3046. * Return: none
  3047. */
  3048. static
  3049. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3050. struct hal_tx_completion_status *ts)
  3051. {
  3052. struct meta_hdr_s *mhdr;
  3053. qdf_nbuf_t netbuf = tx_desc->nbuf;
  3054. if (!tx_desc->msdu_ext_desc) {
  3055. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  3056. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3057. "netbuf %pK offset %d",
  3058. netbuf, tx_desc->pkt_offset);
  3059. return;
  3060. }
  3061. }
  3062. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  3063. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3064. "netbuf %pK offset %lu", netbuf,
  3065. sizeof(struct meta_hdr_s));
  3066. return;
  3067. }
  3068. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  3069. mhdr->rssi = ts->ack_frame_rssi;
  3070. mhdr->band = tx_desc->pdev->operating_channel.band;
  3071. mhdr->channel = tx_desc->pdev->operating_channel.num;
  3072. }
  3073. #else
  3074. static
  3075. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3076. struct hal_tx_completion_status *ts)
  3077. {
  3078. }
  3079. #endif
  3080. #ifdef QCA_PEER_EXT_STATS
  3081. /*
  3082. * dp_tx_compute_tid_delay() - Compute per TID delay
  3083. * @stats: Per TID delay stats
  3084. * @tx_desc: Software Tx descriptor
  3085. *
  3086. * Compute the software enqueue and hw enqueue delays and
  3087. * update the respective histograms
  3088. *
  3089. * Return: void
  3090. */
  3091. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3092. struct dp_tx_desc_s *tx_desc)
  3093. {
  3094. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3095. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3096. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  3097. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3098. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3099. timestamp_hw_enqueue = tx_desc->timestamp;
  3100. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3101. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3102. timestamp_hw_enqueue);
  3103. /*
  3104. * Update the Tx software enqueue delay and HW enque-Completion delay.
  3105. */
  3106. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3107. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  3108. }
  3109. /*
  3110. * dp_tx_update_peer_ext_stats() - Update the peer extended stats
  3111. * @peer: DP peer context
  3112. * @tx_desc: Tx software descriptor
  3113. * @tid: Transmission ID
  3114. * @ring_id: Rx CPU context ID/CPU_ID
  3115. *
  3116. * Update the peer extended stats. These are enhanced other
  3117. * delay stats per msdu level.
  3118. *
  3119. * Return: void
  3120. */
  3121. static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3122. struct dp_tx_desc_s *tx_desc,
  3123. uint8_t tid, uint8_t ring_id)
  3124. {
  3125. struct dp_pdev *pdev = peer->vdev->pdev;
  3126. struct dp_soc *soc = NULL;
  3127. struct cdp_peer_ext_stats *pext_stats = NULL;
  3128. soc = pdev->soc;
  3129. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  3130. return;
  3131. pext_stats = peer->pext_stats;
  3132. qdf_assert(pext_stats);
  3133. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3134. /*
  3135. * For non-TID packets use the TID 9
  3136. */
  3137. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3138. tid = CDP_MAX_DATA_TIDS - 1;
  3139. dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
  3140. tx_desc);
  3141. }
  3142. #else
  3143. static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3144. struct dp_tx_desc_s *tx_desc,
  3145. uint8_t tid, uint8_t ring_id)
  3146. {
  3147. }
  3148. #endif
  3149. /**
  3150. * dp_tx_compute_delay() - Compute and fill in all timestamps
  3151. * to pass in correct fields
  3152. *
  3153. * @vdev: pdev handle
  3154. * @tx_desc: tx descriptor
  3155. * @tid: tid value
  3156. * @ring_id: TCL or WBM ring number for transmit path
  3157. * Return: none
  3158. */
  3159. static void dp_tx_compute_delay(struct dp_vdev *vdev,
  3160. struct dp_tx_desc_s *tx_desc,
  3161. uint8_t tid, uint8_t ring_id)
  3162. {
  3163. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3164. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  3165. if (qdf_likely(!vdev->pdev->delay_stats_flag))
  3166. return;
  3167. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3168. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3169. timestamp_hw_enqueue = tx_desc->timestamp;
  3170. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3171. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3172. timestamp_hw_enqueue);
  3173. interframe_delay = (uint32_t)(timestamp_ingress -
  3174. vdev->prev_tx_enq_tstamp);
  3175. /*
  3176. * Delay in software enqueue
  3177. */
  3178. dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
  3179. CDP_DELAY_STATS_SW_ENQ, ring_id);
  3180. /*
  3181. * Delay between packet enqueued to HW and Tx completion
  3182. */
  3183. dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
  3184. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
  3185. /*
  3186. * Update interframe delay stats calculated at hardstart receive point.
  3187. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  3188. * interframe delay will not be calculate correctly for 1st frame.
  3189. * On the other side, this will help in avoiding extra per packet check
  3190. * of !vdev->prev_tx_enq_tstamp.
  3191. */
  3192. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  3193. CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
  3194. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  3195. }
  3196. #ifdef DISABLE_DP_STATS
  3197. static
  3198. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3199. {
  3200. }
  3201. #else
  3202. static
  3203. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3204. {
  3205. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  3206. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  3207. if (subtype != QDF_PROTO_INVALID)
  3208. DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
  3209. }
  3210. #endif
  3211. /**
  3212. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  3213. * per wbm ring
  3214. *
  3215. * @tx_desc: software descriptor head pointer
  3216. * @ts: Tx completion status
  3217. * @peer: peer handle
  3218. * @ring_id: ring number
  3219. *
  3220. * Return: None
  3221. */
  3222. static inline void
  3223. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  3224. struct hal_tx_completion_status *ts,
  3225. struct dp_peer *peer, uint8_t ring_id)
  3226. {
  3227. struct dp_pdev *pdev = peer->vdev->pdev;
  3228. struct dp_soc *soc = NULL;
  3229. uint8_t mcs, pkt_type;
  3230. uint8_t tid = ts->tid;
  3231. uint32_t length;
  3232. struct cdp_tid_tx_stats *tid_stats;
  3233. if (!pdev)
  3234. return;
  3235. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3236. tid = CDP_MAX_DATA_TIDS - 1;
  3237. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3238. soc = pdev->soc;
  3239. mcs = ts->mcs;
  3240. pkt_type = ts->pkt_type;
  3241. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  3242. dp_err("Release source is not from TQM");
  3243. return;
  3244. }
  3245. length = qdf_nbuf_len(tx_desc->nbuf);
  3246. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  3247. if (qdf_unlikely(pdev->delay_stats_flag))
  3248. dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
  3249. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  3250. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  3251. DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
  3252. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3253. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  3254. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  3255. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  3256. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  3257. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  3258. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  3259. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  3260. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  3261. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  3262. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  3263. /*
  3264. * tx_failed is ideally supposed to be updated from HTT ppdu completion
  3265. * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
  3266. * are no completions for failed cases. Hence updating tx_failed from
  3267. * data path. Please note that if tx_failed is fixed to be from ppdu,
  3268. * then this has to be removed
  3269. */
  3270. peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
  3271. peer->stats.tx.dropped.fw_rem_notx +
  3272. peer->stats.tx.dropped.fw_rem_tx +
  3273. peer->stats.tx.dropped.age_out +
  3274. peer->stats.tx.dropped.fw_reason1 +
  3275. peer->stats.tx.dropped.fw_reason2 +
  3276. peer->stats.tx.dropped.fw_reason3;
  3277. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  3278. tid_stats->tqm_status_cnt[ts->status]++;
  3279. }
  3280. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  3281. dp_update_no_ack_stats(tx_desc->nbuf, peer);
  3282. return;
  3283. }
  3284. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  3285. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  3286. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  3287. /*
  3288. * Following Rate Statistics are updated from HTT PPDU events from FW.
  3289. * Return from here if HTT PPDU events are enabled.
  3290. */
  3291. if (!(soc->process_tx_status))
  3292. return;
  3293. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3294. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  3295. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3296. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  3297. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3298. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3299. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3300. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3301. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3302. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3303. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3304. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3305. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3306. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3307. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3308. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3309. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3310. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3311. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3312. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3313. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  3314. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  3315. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  3316. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  3317. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  3318. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  3319. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  3320. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  3321. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  3322. &peer->stats, ts->peer_id,
  3323. UPDATE_PEER_STATS, pdev->pdev_id);
  3324. #endif
  3325. }
  3326. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3327. /**
  3328. * dp_tx_flow_pool_lock() - take flow pool lock
  3329. * @soc: core txrx main context
  3330. * @tx_desc: tx desc
  3331. *
  3332. * Return: None
  3333. */
  3334. static inline
  3335. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  3336. struct dp_tx_desc_s *tx_desc)
  3337. {
  3338. struct dp_tx_desc_pool_s *pool;
  3339. uint8_t desc_pool_id;
  3340. desc_pool_id = tx_desc->pool_id;
  3341. pool = &soc->tx_desc[desc_pool_id];
  3342. qdf_spin_lock_bh(&pool->flow_pool_lock);
  3343. }
  3344. /**
  3345. * dp_tx_flow_pool_unlock() - release flow pool lock
  3346. * @soc: core txrx main context
  3347. * @tx_desc: tx desc
  3348. *
  3349. * Return: None
  3350. */
  3351. static inline
  3352. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  3353. struct dp_tx_desc_s *tx_desc)
  3354. {
  3355. struct dp_tx_desc_pool_s *pool;
  3356. uint8_t desc_pool_id;
  3357. desc_pool_id = tx_desc->pool_id;
  3358. pool = &soc->tx_desc[desc_pool_id];
  3359. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  3360. }
  3361. #else
  3362. static inline
  3363. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3364. {
  3365. }
  3366. static inline
  3367. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3368. {
  3369. }
  3370. #endif
  3371. /**
  3372. * dp_tx_notify_completion() - Notify tx completion for this desc
  3373. * @soc: core txrx main context
  3374. * @vdev: datapath vdev handle
  3375. * @tx_desc: tx desc
  3376. * @netbuf: buffer
  3377. * @status: tx status
  3378. *
  3379. * Return: none
  3380. */
  3381. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  3382. struct dp_vdev *vdev,
  3383. struct dp_tx_desc_s *tx_desc,
  3384. qdf_nbuf_t netbuf,
  3385. uint8_t status)
  3386. {
  3387. void *osif_dev;
  3388. ol_txrx_completion_fp tx_compl_cbk = NULL;
  3389. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  3390. qdf_assert(tx_desc);
  3391. dp_tx_flow_pool_lock(soc, tx_desc);
  3392. if (!vdev ||
  3393. !vdev->osif_vdev) {
  3394. dp_tx_flow_pool_unlock(soc, tx_desc);
  3395. return;
  3396. }
  3397. osif_dev = vdev->osif_vdev;
  3398. tx_compl_cbk = vdev->tx_comp;
  3399. dp_tx_flow_pool_unlock(soc, tx_desc);
  3400. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3401. flag |= BIT(QDF_TX_RX_STATUS_OK);
  3402. if (tx_compl_cbk)
  3403. tx_compl_cbk(netbuf, osif_dev, flag);
  3404. }
  3405. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  3406. * @pdev: pdev handle
  3407. * @tid: tid value
  3408. * @txdesc_ts: timestamp from txdesc
  3409. * @ppdu_id: ppdu id
  3410. *
  3411. * Return: none
  3412. */
  3413. #ifdef FEATURE_PERPKT_INFO
  3414. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3415. struct dp_peer *peer,
  3416. uint8_t tid,
  3417. uint64_t txdesc_ts,
  3418. uint32_t ppdu_id)
  3419. {
  3420. uint64_t delta_ms;
  3421. struct cdp_tx_sojourn_stats *sojourn_stats;
  3422. if (qdf_unlikely(pdev->enhanced_stats_en == 0))
  3423. return;
  3424. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  3425. tid >= CDP_DATA_TID_MAX))
  3426. return;
  3427. if (qdf_unlikely(!pdev->sojourn_buf))
  3428. return;
  3429. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  3430. qdf_nbuf_data(pdev->sojourn_buf);
  3431. sojourn_stats->cookie = (void *)peer->rdkstats_ctx;
  3432. delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
  3433. txdesc_ts;
  3434. qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
  3435. delta_ms);
  3436. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  3437. sojourn_stats->num_msdus[tid] = 1;
  3438. sojourn_stats->avg_sojourn_msdu[tid].internal =
  3439. peer->avg_sojourn_msdu[tid].internal;
  3440. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  3441. pdev->sojourn_buf, HTT_INVALID_PEER,
  3442. WDI_NO_VAL, pdev->pdev_id);
  3443. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  3444. sojourn_stats->num_msdus[tid] = 0;
  3445. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  3446. }
  3447. #else
  3448. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3449. struct dp_peer *peer,
  3450. uint8_t tid,
  3451. uint64_t txdesc_ts,
  3452. uint32_t ppdu_id)
  3453. {
  3454. }
  3455. #endif
  3456. /**
  3457. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  3458. * @soc: DP Soc handle
  3459. * @tx_desc: software Tx descriptor
  3460. * @ts : Tx completion status from HAL/HTT descriptor
  3461. *
  3462. * Return: none
  3463. */
  3464. static inline void
  3465. dp_tx_comp_process_desc(struct dp_soc *soc,
  3466. struct dp_tx_desc_s *desc,
  3467. struct hal_tx_completion_status *ts,
  3468. struct dp_peer *peer)
  3469. {
  3470. uint64_t time_latency = 0;
  3471. /*
  3472. * m_copy/tx_capture modes are not supported for
  3473. * scatter gather packets
  3474. */
  3475. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  3476. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  3477. desc->timestamp);
  3478. }
  3479. if (!(desc->msdu_ext_desc)) {
  3480. if (QDF_STATUS_SUCCESS ==
  3481. dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
  3482. return;
  3483. }
  3484. if (QDF_STATUS_SUCCESS ==
  3485. dp_get_completion_indication_for_stack(soc,
  3486. desc->pdev,
  3487. peer, ts,
  3488. desc->nbuf,
  3489. time_latency)) {
  3490. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  3491. QDF_DMA_TO_DEVICE,
  3492. desc->nbuf->len);
  3493. dp_send_completion_to_stack(soc,
  3494. desc->pdev,
  3495. ts->peer_id,
  3496. ts->ppdu_id,
  3497. desc->nbuf);
  3498. return;
  3499. }
  3500. }
  3501. dp_tx_comp_free_buf(soc, desc);
  3502. }
  3503. #ifdef DISABLE_DP_STATS
  3504. /**
  3505. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  3506. * @soc: core txrx main context
  3507. * @tx_desc: tx desc
  3508. * @status: tx status
  3509. *
  3510. * Return: none
  3511. */
  3512. static inline
  3513. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3514. struct dp_vdev *vdev,
  3515. struct dp_tx_desc_s *tx_desc,
  3516. uint8_t status)
  3517. {
  3518. }
  3519. #else
  3520. static inline
  3521. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3522. struct dp_vdev *vdev,
  3523. struct dp_tx_desc_s *tx_desc,
  3524. uint8_t status)
  3525. {
  3526. void *osif_dev;
  3527. ol_txrx_stats_rx_fp stats_cbk;
  3528. uint8_t pkt_type;
  3529. qdf_assert(tx_desc);
  3530. if (!vdev ||
  3531. !vdev->osif_vdev ||
  3532. !vdev->stats_cb)
  3533. return;
  3534. osif_dev = vdev->osif_vdev;
  3535. stats_cbk = vdev->stats_cb;
  3536. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  3537. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3538. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  3539. &pkt_type);
  3540. }
  3541. #endif
  3542. /**
  3543. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  3544. * @soc: DP soc handle
  3545. * @tx_desc: software descriptor head pointer
  3546. * @ts: Tx completion status
  3547. * @peer: peer handle
  3548. * @ring_id: ring number
  3549. *
  3550. * Return: none
  3551. */
  3552. static inline
  3553. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  3554. struct dp_tx_desc_s *tx_desc,
  3555. struct hal_tx_completion_status *ts,
  3556. struct dp_peer *peer, uint8_t ring_id)
  3557. {
  3558. uint32_t length;
  3559. qdf_ether_header_t *eh;
  3560. struct dp_vdev *vdev = NULL;
  3561. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3562. uint8_t dp_status;
  3563. if (!nbuf) {
  3564. dp_info_rl("invalid tx descriptor. nbuf NULL");
  3565. goto out;
  3566. }
  3567. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  3568. length = qdf_nbuf_len(nbuf);
  3569. dp_status = qdf_dp_get_status_from_htt(ts->status);
  3570. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  3571. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  3572. QDF_TRACE_DEFAULT_PDEV_ID,
  3573. qdf_nbuf_data_addr(nbuf),
  3574. sizeof(qdf_nbuf_data(nbuf)),
  3575. tx_desc->id,
  3576. dp_status));
  3577. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3578. "-------------------- \n"
  3579. "Tx Completion Stats: \n"
  3580. "-------------------- \n"
  3581. "ack_frame_rssi = %d \n"
  3582. "first_msdu = %d \n"
  3583. "last_msdu = %d \n"
  3584. "msdu_part_of_amsdu = %d \n"
  3585. "rate_stats valid = %d \n"
  3586. "bw = %d \n"
  3587. "pkt_type = %d \n"
  3588. "stbc = %d \n"
  3589. "ldpc = %d \n"
  3590. "sgi = %d \n"
  3591. "mcs = %d \n"
  3592. "ofdma = %d \n"
  3593. "tones_in_ru = %d \n"
  3594. "tsf = %d \n"
  3595. "ppdu_id = %d \n"
  3596. "transmit_cnt = %d \n"
  3597. "tid = %d \n"
  3598. "peer_id = %d\n",
  3599. ts->ack_frame_rssi, ts->first_msdu,
  3600. ts->last_msdu, ts->msdu_part_of_amsdu,
  3601. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  3602. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  3603. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  3604. ts->transmit_cnt, ts->tid, ts->peer_id);
  3605. /* Update SoC level stats */
  3606. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  3607. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3608. if (!peer) {
  3609. dp_err_rl("peer is null or deletion in progress");
  3610. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  3611. goto out;
  3612. }
  3613. vdev = peer->vdev;
  3614. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  3615. /* Update per-packet stats for mesh mode */
  3616. if (qdf_unlikely(vdev->mesh_vdev) &&
  3617. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  3618. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  3619. /* Update peer level stats */
  3620. if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
  3621. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  3622. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  3623. if ((peer->vdev->tx_encap_type ==
  3624. htt_cmn_pkt_type_ethernet) &&
  3625. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  3626. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  3627. }
  3628. }
  3629. } else {
  3630. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  3631. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  3632. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  3633. if (qdf_unlikely(peer->in_twt)) {
  3634. DP_STATS_INC_PKT(peer,
  3635. tx.tx_success_twt,
  3636. 1, length);
  3637. }
  3638. }
  3639. }
  3640. dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
  3641. dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
  3642. #ifdef QCA_SUPPORT_RDK_STATS
  3643. if (soc->rdkstats_enabled)
  3644. dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
  3645. tx_desc->timestamp,
  3646. ts->ppdu_id);
  3647. #endif
  3648. out:
  3649. return;
  3650. }
  3651. /**
  3652. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  3653. * @soc: core txrx main context
  3654. * @comp_head: software descriptor head pointer
  3655. * @ring_id: ring number
  3656. *
  3657. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  3658. * and release the software descriptors after processing is complete
  3659. *
  3660. * Return: none
  3661. */
  3662. static void
  3663. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  3664. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  3665. {
  3666. struct dp_tx_desc_s *desc;
  3667. struct dp_tx_desc_s *next;
  3668. struct hal_tx_completion_status ts;
  3669. struct dp_peer *peer = NULL;
  3670. uint16_t peer_id = DP_INVALID_PEER;
  3671. qdf_nbuf_t netbuf;
  3672. desc = comp_head;
  3673. while (desc) {
  3674. if (peer_id != desc->peer_id) {
  3675. if (peer)
  3676. dp_peer_unref_delete(peer,
  3677. DP_MOD_ID_TX_COMP);
  3678. peer_id = desc->peer_id;
  3679. peer = dp_peer_get_ref_by_id(soc, peer_id,
  3680. DP_MOD_ID_TX_COMP);
  3681. }
  3682. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  3683. struct dp_pdev *pdev = desc->pdev;
  3684. if (qdf_likely(peer)) {
  3685. /*
  3686. * Increment peer statistics
  3687. * Minimal statistics update done here
  3688. */
  3689. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
  3690. desc->length);
  3691. if (desc->tx_status !=
  3692. HAL_TX_TQM_RR_FRAME_ACKED)
  3693. DP_STATS_INC(peer, tx.tx_failed, 1);
  3694. }
  3695. qdf_assert(pdev);
  3696. dp_tx_outstanding_dec(pdev);
  3697. /*
  3698. * Calling a QDF WRAPPER here is creating signifcant
  3699. * performance impact so avoided the wrapper call here
  3700. */
  3701. next = desc->next;
  3702. qdf_mem_unmap_nbytes_single(soc->osdev,
  3703. desc->dma_addr,
  3704. QDF_DMA_TO_DEVICE,
  3705. desc->length);
  3706. qdf_nbuf_free(desc->nbuf);
  3707. dp_tx_desc_free(soc, desc, desc->pool_id);
  3708. desc = next;
  3709. continue;
  3710. }
  3711. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  3712. dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
  3713. netbuf = desc->nbuf;
  3714. /* check tx complete notification */
  3715. if (peer &&
  3716. QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
  3717. dp_tx_notify_completion(soc, peer->vdev, desc,
  3718. netbuf, ts.status);
  3719. dp_tx_comp_process_desc(soc, desc, &ts, peer);
  3720. next = desc->next;
  3721. dp_tx_desc_release(desc, desc->pool_id);
  3722. desc = next;
  3723. }
  3724. if (peer)
  3725. dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
  3726. }
  3727. /**
  3728. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  3729. * @tx_desc: software descriptor head pointer
  3730. * @status : Tx completion status from HTT descriptor
  3731. * @ring_id: ring number
  3732. *
  3733. * This function will process HTT Tx indication messages from Target
  3734. *
  3735. * Return: none
  3736. */
  3737. static
  3738. void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
  3739. uint8_t ring_id)
  3740. {
  3741. uint8_t tx_status;
  3742. struct dp_pdev *pdev;
  3743. struct dp_vdev *vdev;
  3744. struct dp_soc *soc;
  3745. struct hal_tx_completion_status ts = {0};
  3746. uint32_t *htt_desc = (uint32_t *)status;
  3747. struct dp_peer *peer;
  3748. struct cdp_tid_tx_stats *tid_stats = NULL;
  3749. struct htt_soc *htt_handle;
  3750. /*
  3751. * If the descriptor is already freed in vdev_detach,
  3752. * continue to next descriptor
  3753. */
  3754. if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) {
  3755. QDF_TRACE(QDF_MODULE_ID_DP,
  3756. QDF_TRACE_LEVEL_INFO,
  3757. "Descriptor freed in vdev_detach %d",
  3758. tx_desc->id);
  3759. return;
  3760. }
  3761. pdev = tx_desc->pdev;
  3762. soc = pdev->soc;
  3763. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3764. QDF_TRACE(QDF_MODULE_ID_DP,
  3765. QDF_TRACE_LEVEL_INFO,
  3766. "pdev in down state %d",
  3767. tx_desc->id);
  3768. dp_tx_comp_free_buf(soc, tx_desc);
  3769. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3770. return;
  3771. }
  3772. qdf_assert(tx_desc->pdev);
  3773. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  3774. DP_MOD_ID_HTT_COMP);
  3775. if (!vdev)
  3776. return;
  3777. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  3778. htt_handle = (struct htt_soc *)soc->htt_handle;
  3779. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  3780. switch (tx_status) {
  3781. case HTT_TX_FW2WBM_TX_STATUS_OK:
  3782. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  3783. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  3784. {
  3785. uint8_t tid;
  3786. if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
  3787. ts.peer_id =
  3788. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
  3789. htt_desc[2]);
  3790. ts.tid =
  3791. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
  3792. htt_desc[2]);
  3793. } else {
  3794. ts.peer_id = HTT_INVALID_PEER;
  3795. ts.tid = HTT_INVALID_TID;
  3796. }
  3797. ts.ppdu_id =
  3798. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
  3799. htt_desc[1]);
  3800. ts.ack_frame_rssi =
  3801. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
  3802. htt_desc[1]);
  3803. ts.tsf = htt_desc[3];
  3804. ts.first_msdu = 1;
  3805. ts.last_msdu = 1;
  3806. tid = ts.tid;
  3807. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3808. tid = CDP_MAX_DATA_TIDS - 1;
  3809. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3810. if (qdf_unlikely(pdev->delay_stats_flag))
  3811. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  3812. if (tx_status < CDP_MAX_TX_HTT_STATUS) {
  3813. tid_stats->htt_status_cnt[tx_status]++;
  3814. }
  3815. peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
  3816. DP_MOD_ID_HTT_COMP);
  3817. dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
  3818. dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
  3819. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3820. if (qdf_likely(peer))
  3821. dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
  3822. break;
  3823. }
  3824. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  3825. {
  3826. dp_tx_reinject_handler(soc, vdev, tx_desc, status);
  3827. break;
  3828. }
  3829. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  3830. {
  3831. dp_tx_inspect_handler(soc, vdev, tx_desc, status);
  3832. break;
  3833. }
  3834. case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
  3835. {
  3836. dp_tx_mec_handler(vdev, status);
  3837. break;
  3838. }
  3839. default:
  3840. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3841. "%s Invalid HTT tx_status %d\n",
  3842. __func__, tx_status);
  3843. break;
  3844. }
  3845. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  3846. }
  3847. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  3848. static inline
  3849. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3850. {
  3851. bool limit_hit = false;
  3852. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  3853. limit_hit =
  3854. (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
  3855. if (limit_hit)
  3856. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  3857. return limit_hit;
  3858. }
  3859. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3860. {
  3861. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  3862. }
  3863. #else
  3864. static inline
  3865. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3866. {
  3867. return false;
  3868. }
  3869. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3870. {
  3871. return false;
  3872. }
  3873. #endif
  3874. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  3875. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  3876. uint32_t quota)
  3877. {
  3878. void *tx_comp_hal_desc;
  3879. uint8_t buffer_src;
  3880. uint8_t pool_id;
  3881. uint32_t tx_desc_id;
  3882. struct dp_tx_desc_s *tx_desc = NULL;
  3883. struct dp_tx_desc_s *head_desc = NULL;
  3884. struct dp_tx_desc_s *tail_desc = NULL;
  3885. uint32_t num_processed = 0;
  3886. uint32_t count;
  3887. uint32_t num_avail_for_reap = 0;
  3888. bool force_break = false;
  3889. DP_HIST_INIT();
  3890. more_data:
  3891. /* Re-initialize local variables to be re-used */
  3892. head_desc = NULL;
  3893. tail_desc = NULL;
  3894. count = 0;
  3895. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  3896. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  3897. return 0;
  3898. }
  3899. num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
  3900. if (num_avail_for_reap >= quota)
  3901. num_avail_for_reap = quota;
  3902. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  3903. /* Find head descriptor from completion ring */
  3904. while (qdf_likely(num_avail_for_reap)) {
  3905. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  3906. if (qdf_unlikely(!tx_comp_hal_desc))
  3907. break;
  3908. buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
  3909. /* If this buffer was not released by TQM or FW, then it is not
  3910. * Tx completion indication, assert */
  3911. if (qdf_unlikely(buffer_src !=
  3912. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  3913. (qdf_unlikely(buffer_src !=
  3914. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  3915. uint8_t wbm_internal_error;
  3916. dp_err_rl(
  3917. "Tx comp release_src != TQM | FW but from %d",
  3918. buffer_src);
  3919. hal_dump_comp_desc(tx_comp_hal_desc);
  3920. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  3921. /* When WBM sees NULL buffer_addr_info in any of
  3922. * ingress rings it sends an error indication,
  3923. * with wbm_internal_error=1, to a specific ring.
  3924. * The WBM2SW ring used to indicate these errors is
  3925. * fixed in HW, and that ring is being used as Tx
  3926. * completion ring. These errors are not related to
  3927. * Tx completions, and should just be ignored
  3928. */
  3929. wbm_internal_error = hal_get_wbm_internal_error(
  3930. soc->hal_soc,
  3931. tx_comp_hal_desc);
  3932. if (wbm_internal_error) {
  3933. dp_err_rl("Tx comp wbm_internal_error!!");
  3934. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  3935. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  3936. buffer_src)
  3937. dp_handle_wbm_internal_error(
  3938. soc,
  3939. tx_comp_hal_desc,
  3940. hal_tx_comp_get_buffer_type(
  3941. tx_comp_hal_desc));
  3942. } else {
  3943. dp_err_rl("Tx comp wbm_internal_error false");
  3944. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  3945. }
  3946. continue;
  3947. }
  3948. /* Get descriptor id */
  3949. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  3950. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  3951. DP_TX_DESC_ID_POOL_OS;
  3952. /* Find Tx descriptor */
  3953. tx_desc = dp_tx_desc_find(soc, pool_id,
  3954. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  3955. DP_TX_DESC_ID_PAGE_OS,
  3956. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  3957. DP_TX_DESC_ID_OFFSET_OS);
  3958. /*
  3959. * If the release source is FW, process the HTT status
  3960. */
  3961. if (qdf_unlikely(buffer_src ==
  3962. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  3963. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  3964. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  3965. htt_tx_status);
  3966. dp_tx_process_htt_completion(tx_desc,
  3967. htt_tx_status, ring_id);
  3968. } else {
  3969. tx_desc->peer_id =
  3970. hal_tx_comp_get_peer_id(tx_comp_hal_desc);
  3971. tx_desc->tx_status =
  3972. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  3973. /*
  3974. * If the fast completion mode is enabled extended
  3975. * metadata from descriptor is not copied
  3976. */
  3977. if (qdf_likely(tx_desc->flags &
  3978. DP_TX_DESC_FLAG_SIMPLE))
  3979. goto add_to_pool;
  3980. /*
  3981. * If the descriptor is already freed in vdev_detach,
  3982. * continue to next descriptor
  3983. */
  3984. if (qdf_unlikely
  3985. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  3986. !tx_desc->flags)) {
  3987. QDF_TRACE(QDF_MODULE_ID_DP,
  3988. QDF_TRACE_LEVEL_INFO,
  3989. "Descriptor freed in vdev_detach %d",
  3990. tx_desc_id);
  3991. continue;
  3992. }
  3993. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3994. QDF_TRACE(QDF_MODULE_ID_DP,
  3995. QDF_TRACE_LEVEL_INFO,
  3996. "pdev in down state %d",
  3997. tx_desc_id);
  3998. dp_tx_comp_free_buf(soc, tx_desc);
  3999. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  4000. goto next_desc;
  4001. }
  4002. /* Pool id is not matching. Error */
  4003. if (tx_desc->pool_id != pool_id) {
  4004. QDF_TRACE(QDF_MODULE_ID_DP,
  4005. QDF_TRACE_LEVEL_FATAL,
  4006. "Tx Comp pool id %d not matched %d",
  4007. pool_id, tx_desc->pool_id);
  4008. qdf_assert_always(0);
  4009. }
  4010. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  4011. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  4012. QDF_TRACE(QDF_MODULE_ID_DP,
  4013. QDF_TRACE_LEVEL_FATAL,
  4014. "Txdesc invalid, flgs = %x,id = %d",
  4015. tx_desc->flags, tx_desc_id);
  4016. qdf_assert_always(0);
  4017. }
  4018. /* Collect hw completion contents */
  4019. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  4020. &tx_desc->comp, 1);
  4021. add_to_pool:
  4022. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  4023. /* First ring descriptor on the cycle */
  4024. if (!head_desc) {
  4025. head_desc = tx_desc;
  4026. tail_desc = tx_desc;
  4027. }
  4028. tail_desc->next = tx_desc;
  4029. tx_desc->next = NULL;
  4030. tail_desc = tx_desc;
  4031. }
  4032. next_desc:
  4033. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  4034. /*
  4035. * Processed packet count is more than given quota
  4036. * stop to processing
  4037. */
  4038. count++;
  4039. if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
  4040. break;
  4041. }
  4042. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  4043. /* Process the reaped descriptors */
  4044. if (head_desc)
  4045. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  4046. if (dp_tx_comp_enable_eol_data_check(soc)) {
  4047. if (num_processed >= quota)
  4048. force_break = true;
  4049. if (!force_break &&
  4050. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  4051. hal_ring_hdl)) {
  4052. DP_STATS_INC(soc, tx.hp_oos2, 1);
  4053. if (!hif_exec_should_yield(soc->hif_handle,
  4054. int_ctx->dp_intr_id))
  4055. goto more_data;
  4056. }
  4057. }
  4058. DP_TX_HIST_STATS_PER_PDEV();
  4059. return num_processed;
  4060. }
  4061. #ifdef FEATURE_WLAN_TDLS
  4062. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4063. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  4064. {
  4065. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4066. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4067. DP_MOD_ID_TDLS);
  4068. if (!vdev) {
  4069. dp_err("vdev handle for id %d is NULL", vdev_id);
  4070. return NULL;
  4071. }
  4072. if (tx_spec & OL_TX_SPEC_NO_FREE)
  4073. vdev->is_tdls_frame = true;
  4074. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  4075. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  4076. }
  4077. #endif
  4078. static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
  4079. {
  4080. struct wlan_cfg_dp_soc_ctxt *cfg;
  4081. struct dp_soc *soc;
  4082. soc = vdev->pdev->soc;
  4083. if (!soc)
  4084. return;
  4085. cfg = soc->wlan_cfg_ctx;
  4086. if (!cfg)
  4087. return;
  4088. if (vdev->opmode == wlan_op_mode_ndi)
  4089. vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
  4090. else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
  4091. (vdev->subtype == wlan_op_subtype_p2p_cli) ||
  4092. (vdev->subtype == wlan_op_subtype_p2p_go))
  4093. vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
  4094. else
  4095. vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
  4096. }
  4097. /**
  4098. * dp_tx_vdev_attach() - attach vdev to dp tx
  4099. * @vdev: virtual device instance
  4100. *
  4101. * Return: QDF_STATUS_SUCCESS: success
  4102. * QDF_STATUS_E_RESOURCES: Error return
  4103. */
  4104. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  4105. {
  4106. int pdev_id;
  4107. /*
  4108. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  4109. */
  4110. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  4111. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  4112. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  4113. vdev->vdev_id);
  4114. pdev_id =
  4115. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  4116. vdev->pdev->pdev_id);
  4117. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  4118. /*
  4119. * Set HTT Extension Valid bit to 0 by default
  4120. */
  4121. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  4122. dp_tx_vdev_update_search_flags(vdev);
  4123. dp_tx_vdev_update_feature_flags(vdev);
  4124. return QDF_STATUS_SUCCESS;
  4125. }
  4126. #ifndef FEATURE_WDS
  4127. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  4128. {
  4129. return false;
  4130. }
  4131. #endif
  4132. /**
  4133. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  4134. * @vdev: virtual device instance
  4135. *
  4136. * Return: void
  4137. *
  4138. */
  4139. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  4140. {
  4141. struct dp_soc *soc = vdev->pdev->soc;
  4142. /*
  4143. * Enable both AddrY (SA based search) and AddrX (Da based search)
  4144. * for TDLS link
  4145. *
  4146. * Enable AddrY (SA based search) only for non-WDS STA and
  4147. * ProxySTA VAP (in HKv1) modes.
  4148. *
  4149. * In all other VAP modes, only DA based search should be
  4150. * enabled
  4151. */
  4152. if (vdev->opmode == wlan_op_mode_sta &&
  4153. vdev->tdls_link_connected)
  4154. vdev->hal_desc_addr_search_flags =
  4155. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  4156. else if ((vdev->opmode == wlan_op_mode_sta) &&
  4157. !dp_tx_da_search_override(vdev))
  4158. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  4159. else
  4160. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  4161. /* Set search type only when peer map v2 messaging is enabled
  4162. * as we will have the search index (AST hash) only when v2 is
  4163. * enabled
  4164. */
  4165. if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
  4166. vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
  4167. else
  4168. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  4169. }
  4170. static inline bool
  4171. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  4172. struct dp_vdev *vdev,
  4173. struct dp_tx_desc_s *tx_desc)
  4174. {
  4175. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  4176. return false;
  4177. /*
  4178. * if vdev is given, then only check whether desc
  4179. * vdev match. if vdev is NULL, then check whether
  4180. * desc pdev match.
  4181. */
  4182. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  4183. (tx_desc->pdev == pdev);
  4184. }
  4185. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4186. /**
  4187. * dp_tx_desc_flush() - release resources associated
  4188. * to TX Desc
  4189. *
  4190. * @dp_pdev: Handle to DP pdev structure
  4191. * @vdev: virtual device instance
  4192. * NULL: no specific Vdev is required and check all allcated TX desc
  4193. * on this pdev.
  4194. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  4195. *
  4196. * @force_free:
  4197. * true: flush the TX desc.
  4198. * false: only reset the Vdev in each allocated TX desc
  4199. * that associated to current Vdev.
  4200. *
  4201. * This function will go through the TX desc pool to flush
  4202. * the outstanding TX data or reset Vdev to NULL in associated TX
  4203. * Desc.
  4204. */
  4205. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4206. bool force_free)
  4207. {
  4208. uint8_t i;
  4209. uint32_t j;
  4210. uint32_t num_desc, page_id, offset;
  4211. uint16_t num_desc_per_page;
  4212. struct dp_soc *soc = pdev->soc;
  4213. struct dp_tx_desc_s *tx_desc = NULL;
  4214. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4215. if (!vdev && !force_free) {
  4216. dp_err("Reset TX desc vdev, Vdev param is required!");
  4217. return;
  4218. }
  4219. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  4220. tx_desc_pool = &soc->tx_desc[i];
  4221. if (!(tx_desc_pool->pool_size) ||
  4222. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  4223. !(tx_desc_pool->desc_pages.cacheable_pages))
  4224. continue;
  4225. /*
  4226. * Add flow pool lock protection in case pool is freed
  4227. * due to all tx_desc is recycled when handle TX completion.
  4228. * this is not necessary when do force flush as:
  4229. * a. double lock will happen if dp_tx_desc_release is
  4230. * also trying to acquire it.
  4231. * b. dp interrupt has been disabled before do force TX desc
  4232. * flush in dp_pdev_deinit().
  4233. */
  4234. if (!force_free)
  4235. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  4236. num_desc = tx_desc_pool->pool_size;
  4237. num_desc_per_page =
  4238. tx_desc_pool->desc_pages.num_element_per_page;
  4239. for (j = 0; j < num_desc; j++) {
  4240. page_id = j / num_desc_per_page;
  4241. offset = j % num_desc_per_page;
  4242. if (qdf_unlikely(!(tx_desc_pool->
  4243. desc_pages.cacheable_pages)))
  4244. break;
  4245. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4246. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4247. /*
  4248. * Free TX desc if force free is
  4249. * required, otherwise only reset vdev
  4250. * in this TX desc.
  4251. */
  4252. if (force_free) {
  4253. dp_tx_comp_free_buf(soc, tx_desc);
  4254. dp_tx_desc_release(tx_desc, i);
  4255. } else {
  4256. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4257. }
  4258. }
  4259. }
  4260. if (!force_free)
  4261. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  4262. }
  4263. }
  4264. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4265. /**
  4266. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  4267. *
  4268. * @soc: Handle to DP soc structure
  4269. * @tx_desc: pointer of one TX desc
  4270. * @desc_pool_id: TX Desc pool id
  4271. */
  4272. static inline void
  4273. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  4274. uint8_t desc_pool_id)
  4275. {
  4276. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  4277. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4278. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  4279. }
  4280. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4281. bool force_free)
  4282. {
  4283. uint8_t i, num_pool;
  4284. uint32_t j;
  4285. uint32_t num_desc, page_id, offset;
  4286. uint16_t num_desc_per_page;
  4287. struct dp_soc *soc = pdev->soc;
  4288. struct dp_tx_desc_s *tx_desc = NULL;
  4289. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4290. if (!vdev && !force_free) {
  4291. dp_err("Reset TX desc vdev, Vdev param is required!");
  4292. return;
  4293. }
  4294. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4295. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4296. for (i = 0; i < num_pool; i++) {
  4297. tx_desc_pool = &soc->tx_desc[i];
  4298. if (!tx_desc_pool->desc_pages.cacheable_pages)
  4299. continue;
  4300. num_desc_per_page =
  4301. tx_desc_pool->desc_pages.num_element_per_page;
  4302. for (j = 0; j < num_desc; j++) {
  4303. page_id = j / num_desc_per_page;
  4304. offset = j % num_desc_per_page;
  4305. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4306. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4307. if (force_free) {
  4308. dp_tx_comp_free_buf(soc, tx_desc);
  4309. dp_tx_desc_release(tx_desc, i);
  4310. } else {
  4311. dp_tx_desc_reset_vdev(soc, tx_desc,
  4312. i);
  4313. }
  4314. }
  4315. }
  4316. }
  4317. }
  4318. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4319. /**
  4320. * dp_tx_vdev_detach() - detach vdev from dp tx
  4321. * @vdev: virtual device instance
  4322. *
  4323. * Return: QDF_STATUS_SUCCESS: success
  4324. * QDF_STATUS_E_RESOURCES: Error return
  4325. */
  4326. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  4327. {
  4328. struct dp_pdev *pdev = vdev->pdev;
  4329. /* Reset TX desc associated to this Vdev as NULL */
  4330. dp_tx_desc_flush(pdev, vdev, false);
  4331. dp_tx_vdev_multipass_deinit(vdev);
  4332. return QDF_STATUS_SUCCESS;
  4333. }
  4334. /**
  4335. * dp_tx_pdev_attach() - attach pdev to dp tx
  4336. * @pdev: physical device instance
  4337. *
  4338. * Return: QDF_STATUS_SUCCESS: success
  4339. * QDF_STATUS_E_RESOURCES: Error return
  4340. */
  4341. QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
  4342. {
  4343. struct dp_soc *soc = pdev->soc;
  4344. /* Initialize Flow control counters */
  4345. qdf_atomic_init(&pdev->num_tx_outstanding);
  4346. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  4347. /* Initialize descriptors in TCL Ring */
  4348. hal_tx_init_data_ring(soc->hal_soc,
  4349. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  4350. }
  4351. return QDF_STATUS_SUCCESS;
  4352. }
  4353. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4354. /* Pools will be allocated dynamically */
  4355. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4356. int num_desc)
  4357. {
  4358. uint8_t i;
  4359. for (i = 0; i < num_pool; i++) {
  4360. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  4361. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  4362. }
  4363. return QDF_STATUS_SUCCESS;
  4364. }
  4365. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4366. int num_desc)
  4367. {
  4368. return QDF_STATUS_SUCCESS;
  4369. }
  4370. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4371. {
  4372. }
  4373. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4374. {
  4375. uint8_t i;
  4376. for (i = 0; i < num_pool; i++)
  4377. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  4378. }
  4379. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4380. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4381. int num_desc)
  4382. {
  4383. uint8_t i, count;
  4384. /* Allocate software Tx descriptor pools */
  4385. for (i = 0; i < num_pool; i++) {
  4386. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  4387. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4388. FL("Tx Desc Pool alloc %d failed %pK"),
  4389. i, soc);
  4390. goto fail;
  4391. }
  4392. }
  4393. return QDF_STATUS_SUCCESS;
  4394. fail:
  4395. for (count = 0; count < i; count++)
  4396. dp_tx_desc_pool_free(soc, count);
  4397. return QDF_STATUS_E_NOMEM;
  4398. }
  4399. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4400. int num_desc)
  4401. {
  4402. uint8_t i;
  4403. for (i = 0; i < num_pool; i++) {
  4404. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  4405. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4406. FL("Tx Desc Pool init %d failed %pK"),
  4407. i, soc);
  4408. return QDF_STATUS_E_NOMEM;
  4409. }
  4410. }
  4411. return QDF_STATUS_SUCCESS;
  4412. }
  4413. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4414. {
  4415. uint8_t i;
  4416. for (i = 0; i < num_pool; i++)
  4417. dp_tx_desc_pool_deinit(soc, i);
  4418. }
  4419. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4420. {
  4421. uint8_t i;
  4422. for (i = 0; i < num_pool; i++)
  4423. dp_tx_desc_pool_free(soc, i);
  4424. }
  4425. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4426. /**
  4427. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  4428. * @soc: core txrx main context
  4429. * @num_pool: number of pools
  4430. *
  4431. */
  4432. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  4433. {
  4434. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  4435. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  4436. }
  4437. /**
  4438. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  4439. * @soc: core txrx main context
  4440. * @num_pool: number of pools
  4441. *
  4442. */
  4443. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  4444. {
  4445. dp_tx_tso_desc_pool_free(soc, num_pool);
  4446. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  4447. }
  4448. /**
  4449. * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
  4450. * @soc: core txrx main context
  4451. *
  4452. * This function frees all tx related descriptors as below
  4453. * 1. Regular TX descriptors (static pools)
  4454. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4455. * 3. TSO descriptors
  4456. *
  4457. */
  4458. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  4459. {
  4460. uint8_t num_pool;
  4461. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4462. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4463. dp_tx_ext_desc_pool_free(soc, num_pool);
  4464. dp_tx_delete_static_pools(soc, num_pool);
  4465. }
  4466. /**
  4467. * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
  4468. * @soc: core txrx main context
  4469. *
  4470. * This function de-initializes all tx related descriptors as below
  4471. * 1. Regular TX descriptors (static pools)
  4472. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4473. * 3. TSO descriptors
  4474. *
  4475. */
  4476. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  4477. {
  4478. uint8_t num_pool;
  4479. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4480. dp_tx_flow_control_deinit(soc);
  4481. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4482. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4483. dp_tx_deinit_static_pools(soc, num_pool);
  4484. }
  4485. /**
  4486. * dp_tso_attach() - TSO attach handler
  4487. * @txrx_soc: Opaque Dp handle
  4488. *
  4489. * Reserve TSO descriptor buffers
  4490. *
  4491. * Return: QDF_STATUS_E_FAILURE on failure or
  4492. * QDF_STATUS_SUCCESS on success
  4493. */
  4494. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  4495. uint8_t num_pool,
  4496. uint16_t num_desc)
  4497. {
  4498. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  4499. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4500. return QDF_STATUS_E_FAILURE;
  4501. }
  4502. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  4503. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4504. num_pool, soc);
  4505. return QDF_STATUS_E_FAILURE;
  4506. }
  4507. return QDF_STATUS_SUCCESS;
  4508. }
  4509. /**
  4510. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  4511. * @soc: DP soc handle
  4512. * @num_pool: Number of pools
  4513. * @num_desc: Number of descriptors
  4514. *
  4515. * Initialize TSO descriptor pools
  4516. *
  4517. * Return: QDF_STATUS_E_FAILURE on failure or
  4518. * QDF_STATUS_SUCCESS on success
  4519. */
  4520. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  4521. uint8_t num_pool,
  4522. uint16_t num_desc)
  4523. {
  4524. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  4525. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4526. return QDF_STATUS_E_FAILURE;
  4527. }
  4528. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  4529. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4530. num_pool, soc);
  4531. return QDF_STATUS_E_FAILURE;
  4532. }
  4533. return QDF_STATUS_SUCCESS;
  4534. }
  4535. /**
  4536. * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
  4537. * @soc: core txrx main context
  4538. *
  4539. * This function allocates memory for following descriptor pools
  4540. * 1. regular sw tx descriptor pools (static pools)
  4541. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4542. * 3. TSO descriptor pools
  4543. *
  4544. * Return: QDF_STATUS_SUCCESS: success
  4545. * QDF_STATUS_E_RESOURCES: Error return
  4546. */
  4547. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  4548. {
  4549. uint8_t num_pool;
  4550. uint32_t num_desc;
  4551. uint32_t num_ext_desc;
  4552. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4553. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4554. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4555. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4556. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  4557. __func__, num_pool, num_desc);
  4558. if ((num_pool > MAX_TXDESC_POOLS) ||
  4559. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  4560. goto fail1;
  4561. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  4562. goto fail1;
  4563. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4564. goto fail2;
  4565. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4566. return QDF_STATUS_SUCCESS;
  4567. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4568. goto fail3;
  4569. return QDF_STATUS_SUCCESS;
  4570. fail3:
  4571. dp_tx_ext_desc_pool_free(soc, num_pool);
  4572. fail2:
  4573. dp_tx_delete_static_pools(soc, num_pool);
  4574. fail1:
  4575. return QDF_STATUS_E_RESOURCES;
  4576. }
  4577. /**
  4578. * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
  4579. * @soc: core txrx main context
  4580. *
  4581. * This function initializes the following TX descriptor pools
  4582. * 1. regular sw tx descriptor pools (static pools)
  4583. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4584. * 3. TSO descriptor pools
  4585. *
  4586. * Return: QDF_STATUS_SUCCESS: success
  4587. * QDF_STATUS_E_RESOURCES: Error return
  4588. */
  4589. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  4590. {
  4591. uint8_t num_pool;
  4592. uint32_t num_desc;
  4593. uint32_t num_ext_desc;
  4594. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4595. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4596. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4597. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  4598. goto fail1;
  4599. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  4600. goto fail2;
  4601. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4602. return QDF_STATUS_SUCCESS;
  4603. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4604. goto fail3;
  4605. dp_tx_flow_control_init(soc);
  4606. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  4607. return QDF_STATUS_SUCCESS;
  4608. fail3:
  4609. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4610. fail2:
  4611. dp_tx_deinit_static_pools(soc, num_pool);
  4612. fail1:
  4613. return QDF_STATUS_E_RESOURCES;
  4614. }
  4615. /**
  4616. * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
  4617. * @txrx_soc: dp soc handle
  4618. *
  4619. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4620. * QDF_STATUS_E_FAILURE
  4621. */
  4622. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  4623. {
  4624. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4625. uint8_t num_pool;
  4626. uint32_t num_desc;
  4627. uint32_t num_ext_desc;
  4628. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4629. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4630. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4631. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4632. return QDF_STATUS_E_FAILURE;
  4633. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4634. return QDF_STATUS_E_FAILURE;
  4635. return QDF_STATUS_SUCCESS;
  4636. }
  4637. /**
  4638. * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
  4639. * @txrx_soc: dp soc handle
  4640. *
  4641. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4642. */
  4643. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  4644. {
  4645. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4646. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4647. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4648. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4649. return QDF_STATUS_SUCCESS;
  4650. }