dp_rings_main.c 119 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include <wlan_ipa_obj_mgmt_api.h>
  20. #include <qdf_types.h>
  21. #include <qdf_lock.h>
  22. #include <qdf_net_types.h>
  23. #include <qdf_lro.h>
  24. #include <qdf_module.h>
  25. #include <hal_hw_headers.h>
  26. #include <hal_api.h>
  27. #include <hif.h>
  28. #include <htt.h>
  29. #include <wdi_event.h>
  30. #include <queue.h>
  31. #include "dp_types.h"
  32. #include "dp_rings.h"
  33. #include "dp_internal.h"
  34. #include "dp_tx.h"
  35. #include "dp_tx_desc.h"
  36. #include "dp_rx.h"
  37. #ifdef DP_RATETABLE_SUPPORT
  38. #include "dp_ratetable.h"
  39. #endif
  40. #include <cdp_txrx_handle.h>
  41. #include <wlan_cfg.h>
  42. #include <wlan_utility.h>
  43. #include "cdp_txrx_cmn_struct.h"
  44. #include "cdp_txrx_stats_struct.h"
  45. #include "cdp_txrx_cmn_reg.h"
  46. #include <qdf_util.h>
  47. #include "dp_peer.h"
  48. #include "htt_stats.h"
  49. #include "dp_htt.h"
  50. #include "htt_ppdu_stats.h"
  51. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  52. #include "cfg_ucfg_api.h"
  53. #include <wlan_module_ids.h>
  54. #ifdef WIFI_MONITOR_SUPPORT
  55. #include <dp_mon.h>
  56. #endif
  57. #include "qdf_ssr_driver_dump.h"
  58. #ifdef WLAN_FEATURE_STATS_EXT
  59. #define INIT_RX_HW_STATS_LOCK(_soc) \
  60. qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
  61. #define DEINIT_RX_HW_STATS_LOCK(_soc) \
  62. qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
  63. #else
  64. #define INIT_RX_HW_STATS_LOCK(_soc) /* no op */
  65. #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
  66. #endif
  67. static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
  68. uint8_t index);
  69. static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
  70. static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
  71. static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
  72. uint8_t index);
  73. /* default_dscp_tid_map - Default DSCP-TID mapping
  74. *
  75. * DSCP TID
  76. * 000000 0
  77. * 001000 1
  78. * 010000 2
  79. * 011000 3
  80. * 100000 4
  81. * 101000 5
  82. * 110000 6
  83. * 111000 7
  84. */
  85. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  86. 0, 0, 0, 0, 0, 0, 0, 0,
  87. 1, 1, 1, 1, 1, 1, 1, 1,
  88. 2, 2, 2, 2, 2, 2, 2, 2,
  89. 3, 3, 3, 3, 3, 3, 3, 3,
  90. 4, 4, 4, 4, 4, 4, 4, 4,
  91. 5, 5, 5, 5, 5, 5, 5, 5,
  92. 6, 6, 6, 6, 6, 6, 6, 6,
  93. 7, 7, 7, 7, 7, 7, 7, 7,
  94. };
  95. /* default_pcp_tid_map - Default PCP-TID mapping
  96. *
  97. * PCP TID
  98. * 000 0
  99. * 001 1
  100. * 010 2
  101. * 011 3
  102. * 100 4
  103. * 101 5
  104. * 110 6
  105. * 111 7
  106. */
  107. static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
  108. 0, 1, 2, 3, 4, 5, 6, 7,
  109. };
  110. uint8_t
  111. dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
  112. {0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
  113. {0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
  114. {0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
  115. {0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
  116. {0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
  117. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  118. {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
  119. #endif
  120. };
  121. qdf_export_symbol(dp_cpu_ring_map);
  122. /**
  123. * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
  124. * @soc: DP soc handle
  125. * @ring_type: ring type
  126. * @ring_num: ring_num
  127. *
  128. * Return: 0 if the ring is not offloaded, non-0 if it is offloaded
  129. */
  130. static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
  131. enum hal_ring_type ring_type,
  132. int ring_num)
  133. {
  134. uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  135. uint8_t status = 0;
  136. switch (ring_type) {
  137. case WBM2SW_RELEASE:
  138. case REO_DST:
  139. case RXDMA_BUF:
  140. case REO_EXCEPTION:
  141. status = ((nss_config) & (1 << ring_num));
  142. break;
  143. default:
  144. break;
  145. }
  146. return status;
  147. }
  148. #if !defined(DP_CON_MON)
  149. void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
  150. {
  151. int i;
  152. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  153. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  154. soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
  155. }
  156. }
  157. qdf_export_symbol(dp_soc_reset_mon_intr_mask);
  158. void dp_service_lmac_rings(void *arg)
  159. {
  160. struct dp_soc *soc = (struct dp_soc *)arg;
  161. int ring = 0, i;
  162. struct dp_pdev *pdev = NULL;
  163. union dp_rx_desc_list_elem_t *desc_list = NULL;
  164. union dp_rx_desc_list_elem_t *tail = NULL;
  165. /* Process LMAC interrupts */
  166. for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
  167. int mac_for_pdev = ring;
  168. struct dp_srng *rx_refill_buf_ring;
  169. pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
  170. if (!pdev)
  171. continue;
  172. rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
  173. dp_monitor_process(soc, NULL, mac_for_pdev,
  174. QCA_NAPI_BUDGET);
  175. for (i = 0;
  176. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  177. dp_rxdma_err_process(&soc->intr_ctx[i], soc,
  178. mac_for_pdev,
  179. QCA_NAPI_BUDGET);
  180. if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
  181. mac_for_pdev))
  182. dp_rx_buffers_replenish(soc, mac_for_pdev,
  183. rx_refill_buf_ring,
  184. &soc->rx_desc_buf[mac_for_pdev],
  185. 0, &desc_list, &tail, false);
  186. }
  187. qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
  188. }
  189. #endif
  190. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  191. /**
  192. * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
  193. * rx_near_full_grp1 mask
  194. * @soc: Datapath SoC Handle
  195. * @ring_num: REO ring number
  196. *
  197. * Return: 1 if the ring_num belongs to reo_nf_grp1,
  198. * 0, otherwise.
  199. */
  200. static inline int
  201. dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
  202. {
  203. return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
  204. }
  205. /**
  206. * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
  207. * rx_near_full_grp2 mask
  208. * @soc: Datapath SoC Handle
  209. * @ring_num: REO ring number
  210. *
  211. * Return: 1 if the ring_num belongs to reo_nf_grp2,
  212. * 0, otherwise.
  213. */
  214. static inline int
  215. dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
  216. {
  217. return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
  218. }
  219. /**
  220. * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
  221. * ring type and number
  222. * @soc: Datapath SoC handle
  223. * @ring_type: SRNG type
  224. * @ring_num: ring num
  225. *
  226. * Return: near-full irq mask pointer
  227. */
  228. uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
  229. enum hal_ring_type ring_type,
  230. int ring_num)
  231. {
  232. struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
  233. uint8_t wbm2_sw_rx_rel_ring_id;
  234. uint8_t *nf_irq_mask = NULL;
  235. switch (ring_type) {
  236. case WBM2SW_RELEASE:
  237. wbm2_sw_rx_rel_ring_id =
  238. wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
  239. if (ring_num != wbm2_sw_rx_rel_ring_id) {
  240. nf_irq_mask = &soc->wlan_cfg_ctx->
  241. int_tx_ring_near_full_irq_mask[0];
  242. }
  243. break;
  244. case REO_DST:
  245. if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
  246. nf_irq_mask =
  247. &soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
  248. else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
  249. nf_irq_mask =
  250. &soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
  251. else
  252. qdf_assert(0);
  253. break;
  254. default:
  255. break;
  256. }
  257. return nf_irq_mask;
  258. }
  259. /**
  260. * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
  261. * @soc: Datapath SoC handle
  262. * @ring_params: srng params handle
  263. * @msi2_addr: MSI2 addr to be set for the SRNG
  264. * @msi2_data: MSI2 data to be set for the SRNG
  265. *
  266. * Return: None
  267. */
  268. void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
  269. struct hal_srng_params *ring_params,
  270. qdf_dma_addr_t msi2_addr,
  271. uint32_t msi2_data)
  272. {
  273. ring_params->msi2_addr = msi2_addr;
  274. ring_params->msi2_data = msi2_data;
  275. }
  276. /**
  277. * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
  278. * @soc: Datapath SoC handle
  279. * @ring_params: ring_params for SRNG
  280. * @ring_type: SENG type
  281. * @ring_num: ring number for the SRNG
  282. * @nf_msi_grp_num: near full msi group number
  283. *
  284. * Return: None
  285. */
  286. void dp_srng_msi2_setup(struct dp_soc *soc,
  287. struct hal_srng_params *ring_params,
  288. int ring_type, int ring_num, int nf_msi_grp_num)
  289. {
  290. uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
  291. int msi_data_count, ret;
  292. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  293. &msi_data_count, &msi_data_start,
  294. &msi_irq_start);
  295. if (ret)
  296. return;
  297. if (nf_msi_grp_num < 0) {
  298. dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
  299. soc, ring_type, ring_num);
  300. ring_params->msi2_addr = 0;
  301. ring_params->msi2_data = 0;
  302. return;
  303. }
  304. if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
  305. msi_data_count)) {
  306. dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
  307. soc, nf_msi_grp_num);
  308. QDF_ASSERT(0);
  309. }
  310. pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
  311. ring_params->nf_irq_support = 1;
  312. ring_params->msi2_addr = addr_low;
  313. ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
  314. ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
  315. + msi_data_start;
  316. ring_params->flags |= HAL_SRNG_MSI_INTR;
  317. }
  318. /* Percentage of ring entries considered as nearly full */
  319. #define DP_NF_HIGH_THRESH_PERCENTAGE 75
  320. /* Percentage of ring entries considered as critically full */
  321. #define DP_NF_CRIT_THRESH_PERCENTAGE 90
  322. /* Percentage of ring entries considered as safe threshold */
  323. #define DP_NF_SAFE_THRESH_PERCENTAGE 50
  324. /**
  325. * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
  326. * near full irq
  327. * @soc: Datapath SoC handle
  328. * @ring_params: ring params for SRNG
  329. * @ring_type: ring type
  330. */
  331. void
  332. dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
  333. struct hal_srng_params *ring_params,
  334. int ring_type)
  335. {
  336. if (ring_params->nf_irq_support) {
  337. ring_params->high_thresh = (ring_params->num_entries *
  338. DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
  339. ring_params->crit_thresh = (ring_params->num_entries *
  340. DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
  341. ring_params->safe_thresh = (ring_params->num_entries *
  342. DP_NF_SAFE_THRESH_PERCENTAGE) /100;
  343. }
  344. }
  345. /**
  346. * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
  347. * structure from the ring params
  348. * @soc: Datapath SoC handle
  349. * @srng: SRNG handle
  350. * @ring_params: ring params for a SRNG
  351. *
  352. * Return: None
  353. */
  354. static inline void
  355. dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
  356. struct hal_srng_params *ring_params)
  357. {
  358. srng->crit_thresh = ring_params->crit_thresh;
  359. srng->safe_thresh = ring_params->safe_thresh;
  360. }
  361. #else
  362. static inline void
  363. dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
  364. struct hal_srng_params *ring_params)
  365. {
  366. }
  367. #endif
  368. /**
  369. * dp_get_num_msi_available()- API to get number of MSIs available
  370. * @soc: DP soc Handle
  371. * @interrupt_mode: Mode of interrupts
  372. *
  373. * Return: Number of MSIs available or 0 in case of integrated
  374. */
  375. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  376. static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
  377. {
  378. return 0;
  379. }
  380. #else
  381. static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
  382. {
  383. int msi_data_count;
  384. int msi_data_start;
  385. int msi_irq_start;
  386. int ret;
  387. if (interrupt_mode == DP_INTR_INTEGRATED) {
  388. return 0;
  389. } else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
  390. DP_INTR_POLL) {
  391. ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
  392. &msi_data_count,
  393. &msi_data_start,
  394. &msi_irq_start);
  395. if (ret) {
  396. qdf_err("Unable to get DP MSI assignment %d",
  397. interrupt_mode);
  398. return -EINVAL;
  399. }
  400. return msi_data_count;
  401. }
  402. qdf_err("Interrupt mode invalid %d", interrupt_mode);
  403. return -EINVAL;
  404. }
  405. #endif
  406. /**
  407. * dp_srng_configure_pointer_update_thresholds() - Retrieve pointer
  408. * update threshold value from wlan_cfg_ctx
  409. * @soc: device handle
  410. * @ring_params: per ring specific parameters
  411. * @ring_type: Ring type
  412. * @ring_num: Ring number for a given ring type
  413. * @num_entries: number of entries to fill
  414. *
  415. * Fill the ring params with the pointer update threshold
  416. * configuration parameters available in wlan_cfg_ctx
  417. *
  418. * Return: None
  419. */
  420. static void
  421. dp_srng_configure_pointer_update_thresholds(
  422. struct dp_soc *soc,
  423. struct hal_srng_params *ring_params,
  424. int ring_type, int ring_num,
  425. int num_entries)
  426. {
  427. if (ring_type == REO_DST) {
  428. ring_params->pointer_timer_threshold =
  429. wlan_cfg_get_pointer_timer_threshold_rx(
  430. soc->wlan_cfg_ctx);
  431. ring_params->pointer_num_threshold =
  432. wlan_cfg_get_pointer_num_threshold_rx(
  433. soc->wlan_cfg_ctx);
  434. }
  435. }
  436. QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
  437. int ring_type, int ring_num, int mac_id,
  438. uint32_t idx)
  439. {
  440. bool idle_check;
  441. hal_soc_handle_t hal_soc = soc->hal_soc;
  442. struct hal_srng_params ring_params;
  443. if (srng->hal_srng) {
  444. dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
  445. soc, ring_type, ring_num);
  446. return QDF_STATUS_SUCCESS;
  447. }
  448. /* memset the srng ring to zero */
  449. qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
  450. qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
  451. ring_params.ring_base_paddr = srng->base_paddr_aligned;
  452. ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
  453. ring_params.num_entries = srng->num_entries;
  454. dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
  455. ring_type, ring_num,
  456. (void *)ring_params.ring_base_vaddr,
  457. (void *)ring_params.ring_base_paddr,
  458. ring_params.num_entries);
  459. if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
  460. dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
  461. dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
  462. ring_type, ring_num);
  463. } else {
  464. ring_params.msi_data = 0;
  465. ring_params.msi_addr = 0;
  466. dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
  467. dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
  468. ring_type, ring_num);
  469. }
  470. dp_srng_configure_interrupt_thresholds(soc, &ring_params,
  471. ring_type, ring_num,
  472. srng->num_entries);
  473. dp_srng_set_nf_thresholds(soc, srng, &ring_params);
  474. dp_srng_configure_pointer_update_thresholds(soc, &ring_params,
  475. ring_type, ring_num,
  476. srng->num_entries);
  477. if (srng->cached)
  478. ring_params.flags |= HAL_SRNG_CACHED_DESC;
  479. idle_check = dp_check_umac_reset_in_progress(soc);
  480. srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
  481. mac_id, &ring_params, idle_check,
  482. idx);
  483. if (!srng->hal_srng) {
  484. dp_srng_free(soc, srng);
  485. return QDF_STATUS_E_FAILURE;
  486. }
  487. return QDF_STATUS_SUCCESS;
  488. }
  489. qdf_export_symbol(dp_srng_init_idx);
  490. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  491. /**
  492. * dp_service_near_full_srngs() - Bottom half handler to process the near
  493. * full IRQ on a SRNG
  494. * @dp_ctx: Datapath SoC handle
  495. * @dp_budget: Number of SRNGs which can be processed in a single attempt
  496. * without rescheduling
  497. * @cpu: cpu id
  498. *
  499. * Return: remaining budget/quota for the soc device
  500. */
  501. static
  502. uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
  503. {
  504. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  505. struct dp_soc *soc = int_ctx->soc;
  506. /*
  507. * dp_service_near_full_srngs arch ops should be initialized always
  508. * if the NEAR FULL IRQ feature is enabled.
  509. */
  510. return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
  511. dp_budget);
  512. }
  513. #endif
  514. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  515. uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
  516. {
  517. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  518. struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
  519. struct dp_soc *soc = int_ctx->soc;
  520. int ring = 0;
  521. int index;
  522. uint32_t work_done = 0;
  523. int budget = dp_budget;
  524. uint32_t remaining_quota = dp_budget;
  525. uint8_t tx_mask = 0;
  526. uint8_t rx_mask = 0;
  527. uint8_t rx_err_mask = 0;
  528. uint8_t rx_wbm_rel_mask = 0;
  529. uint8_t reo_status_mask = 0;
  530. qdf_atomic_set_bit(cpu, &soc->service_rings_running);
  531. tx_mask = int_ctx->tx_ring_mask;
  532. rx_mask = int_ctx->rx_ring_mask;
  533. rx_err_mask = int_ctx->rx_err_ring_mask;
  534. rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  535. reo_status_mask = int_ctx->reo_status_ring_mask;
  536. dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x",
  537. tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
  538. reo_status_mask,
  539. int_ctx->rx_mon_ring_mask,
  540. int_ctx->host2rxdma_ring_mask,
  541. int_ctx->rxdma2host_ring_mask);
  542. /* Process Tx completion interrupts first to return back buffers */
  543. for (index = 0; index < soc->num_tx_comp_rings; index++) {
  544. if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
  545. continue;
  546. work_done = dp_tx_comp_handler(int_ctx,
  547. soc,
  548. soc->tx_comp_ring[index].hal_srng,
  549. index, remaining_quota);
  550. if (work_done) {
  551. intr_stats->num_tx_ring_masks[index]++;
  552. dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
  553. tx_mask, index, budget,
  554. work_done);
  555. }
  556. budget -= work_done;
  557. if (budget <= 0)
  558. goto budget_done;
  559. remaining_quota = budget;
  560. }
  561. /* Process REO Exception ring interrupt */
  562. if (rx_err_mask) {
  563. work_done = dp_rx_err_process(int_ctx, soc,
  564. soc->reo_exception_ring.hal_srng,
  565. remaining_quota);
  566. if (work_done) {
  567. intr_stats->num_rx_err_ring_masks++;
  568. dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
  569. work_done, budget);
  570. }
  571. budget -= work_done;
  572. if (budget <= 0) {
  573. goto budget_done;
  574. }
  575. remaining_quota = budget;
  576. }
  577. /* Process Rx WBM release ring interrupt */
  578. if (rx_wbm_rel_mask) {
  579. work_done = dp_rx_wbm_err_process(int_ctx, soc,
  580. soc->rx_rel_ring.hal_srng,
  581. remaining_quota);
  582. if (work_done) {
  583. intr_stats->num_rx_wbm_rel_ring_masks++;
  584. dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
  585. work_done, budget);
  586. }
  587. budget -= work_done;
  588. if (budget <= 0) {
  589. goto budget_done;
  590. }
  591. remaining_quota = budget;
  592. }
  593. /* Process Rx interrupts */
  594. if (rx_mask) {
  595. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  596. if (!(rx_mask & (1 << ring)))
  597. continue;
  598. work_done = soc->arch_ops.dp_rx_process(int_ctx,
  599. soc->reo_dest_ring[ring].hal_srng,
  600. ring,
  601. remaining_quota);
  602. if (work_done) {
  603. intr_stats->num_rx_ring_masks[ring]++;
  604. dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
  605. rx_mask, ring,
  606. work_done, budget);
  607. budget -= work_done;
  608. if (budget <= 0)
  609. goto budget_done;
  610. remaining_quota = budget;
  611. }
  612. }
  613. }
  614. if (reo_status_mask) {
  615. if (dp_reo_status_ring_handler(int_ctx, soc))
  616. int_ctx->intr_stats.num_reo_status_ring_masks++;
  617. }
  618. if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
  619. work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
  620. if (work_done) {
  621. budget -= work_done;
  622. if (budget <= 0)
  623. goto budget_done;
  624. remaining_quota = budget;
  625. }
  626. }
  627. qdf_lro_flush(int_ctx->lro_ctx);
  628. intr_stats->num_masks++;
  629. budget_done:
  630. qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
  631. dp_umac_reset_trigger_pre_reset_notify_cb(soc);
  632. return dp_budget - budget;
  633. }
  634. #else /* QCA_HOST_MODE_WIFI_DISABLED */
  635. uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
  636. {
  637. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  638. struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
  639. struct dp_soc *soc = int_ctx->soc;
  640. uint32_t remaining_quota = dp_budget;
  641. uint32_t work_done = 0;
  642. int budget = dp_budget;
  643. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  644. if (reo_status_mask) {
  645. if (dp_reo_status_ring_handler(int_ctx, soc))
  646. int_ctx->intr_stats.num_reo_status_ring_masks++;
  647. }
  648. if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
  649. work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
  650. if (work_done) {
  651. budget -= work_done;
  652. if (budget <= 0)
  653. goto budget_done;
  654. remaining_quota = budget;
  655. }
  656. }
  657. qdf_lro_flush(int_ctx->lro_ctx);
  658. intr_stats->num_masks++;
  659. budget_done:
  660. return dp_budget - budget;
  661. }
  662. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  663. QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
  664. {
  665. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  666. int i;
  667. int lmac_id = 0;
  668. qdf_mem_set(&soc->mon_intr_id_lmac_map,
  669. sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
  670. soc->intr_mode = DP_INTR_POLL;
  671. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  672. soc->intr_ctx[i].dp_intr_id = i;
  673. soc->intr_ctx[i].tx_ring_mask =
  674. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  675. soc->intr_ctx[i].rx_ring_mask =
  676. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  677. soc->intr_ctx[i].rx_mon_ring_mask =
  678. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  679. soc->intr_ctx[i].rx_err_ring_mask =
  680. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  681. soc->intr_ctx[i].rx_wbm_rel_ring_mask =
  682. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  683. soc->intr_ctx[i].reo_status_ring_mask =
  684. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  685. soc->intr_ctx[i].rxdma2host_ring_mask =
  686. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  687. soc->intr_ctx[i].soc = soc;
  688. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  689. if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
  690. hif_event_history_init(soc->hif_handle, i);
  691. soc->mon_intr_id_lmac_map[lmac_id] = i;
  692. lmac_id++;
  693. }
  694. }
  695. qdf_timer_init(soc->osdev, &soc->int_timer,
  696. dp_interrupt_timer, (void *)soc,
  697. QDF_TIMER_TYPE_WAKE_APPS);
  698. return QDF_STATUS_SUCCESS;
  699. }
  700. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  701. /**
  702. * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
  703. * @soc: DP soc handle
  704. * @num_irq: IRQ number
  705. * @irq_id_map: IRQ map
  706. * @intr_id: interrupt context ID
  707. *
  708. * Return: 0 for success. nonzero for failure.
  709. */
  710. static inline int
  711. dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
  712. int irq_id_map[], int intr_id)
  713. {
  714. return hif_register_ext_group(soc->hif_handle,
  715. num_irq, irq_id_map,
  716. dp_service_near_full_srngs,
  717. &soc->intr_ctx[intr_id], "dp_nf_intr",
  718. HIF_EXEC_NAPI_TYPE,
  719. QCA_NAPI_DEF_SCALE_BIN_SHIFT);
  720. }
  721. #else
  722. static inline int
  723. dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
  724. int *irq_id_map, int intr_id)
  725. {
  726. return 0;
  727. }
  728. #endif
  729. #ifdef DP_CON_MON_MSI_SKIP_SET
  730. static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
  731. {
  732. return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
  733. QDF_GLOBAL_MONITOR_MODE &&
  734. !dp_mon_mode_local_pkt_capture(soc));
  735. }
  736. #else
  737. static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
  738. {
  739. return false;
  740. }
  741. #endif
  742. void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
  743. {
  744. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  745. int i;
  746. if (soc->intr_mode == DP_INTR_POLL) {
  747. qdf_timer_free(&soc->int_timer);
  748. } else {
  749. hif_deconfigure_ext_group_interrupts(soc->hif_handle);
  750. hif_deregister_exec_group(soc->hif_handle, "dp_intr");
  751. hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
  752. }
  753. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  754. soc->intr_ctx[i].tx_ring_mask = 0;
  755. soc->intr_ctx[i].rx_ring_mask = 0;
  756. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  757. soc->intr_ctx[i].rx_err_ring_mask = 0;
  758. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
  759. soc->intr_ctx[i].reo_status_ring_mask = 0;
  760. soc->intr_ctx[i].rxdma2host_ring_mask = 0;
  761. soc->intr_ctx[i].host2rxdma_ring_mask = 0;
  762. soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
  763. soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
  764. soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
  765. soc->intr_ctx[i].tx_ring_near_full_mask = 0;
  766. soc->intr_ctx[i].tx_mon_ring_mask = 0;
  767. soc->intr_ctx[i].host2txmon_ring_mask = 0;
  768. soc->intr_ctx[i].umac_reset_intr_mask = 0;
  769. hif_event_history_deinit(soc->hif_handle, i);
  770. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  771. }
  772. qdf_mem_set(&soc->mon_intr_id_lmac_map,
  773. sizeof(soc->mon_intr_id_lmac_map),
  774. DP_MON_INVALID_LMAC_ID);
  775. }
  776. QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
  777. {
  778. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  779. int i = 0;
  780. int num_irq = 0;
  781. int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
  782. int lmac_id = 0;
  783. int napi_scale;
  784. qdf_mem_set(&soc->mon_intr_id_lmac_map,
  785. sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
  786. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  787. int ret = 0;
  788. /* Map of IRQ ids registered with one interrupt context */
  789. int irq_id_map[HIF_MAX_GRP_IRQ];
  790. int tx_mask =
  791. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  792. int rx_mask =
  793. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  794. int rx_mon_mask =
  795. dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
  796. int tx_mon_ring_mask =
  797. wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  798. int rx_err_ring_mask =
  799. wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
  800. int rx_wbm_rel_ring_mask =
  801. wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
  802. int reo_status_ring_mask =
  803. wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
  804. int rxdma2host_ring_mask =
  805. wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
  806. int host2rxdma_ring_mask =
  807. wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
  808. int host2rxdma_mon_ring_mask =
  809. wlan_cfg_get_host2rxdma_mon_ring_mask(
  810. soc->wlan_cfg_ctx, i);
  811. int rx_near_full_grp_1_mask =
  812. wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
  813. i);
  814. int rx_near_full_grp_2_mask =
  815. wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
  816. i);
  817. int tx_ring_near_full_mask =
  818. wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
  819. i);
  820. int host2txmon_ring_mask =
  821. wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
  822. int umac_reset_intr_mask =
  823. wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
  824. if (dp_skip_rx_mon_ring_mask_set(soc))
  825. rx_mon_mask = 0;
  826. soc->intr_ctx[i].dp_intr_id = i;
  827. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  828. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  829. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  830. soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
  831. soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
  832. soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
  833. soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
  834. soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
  835. soc->intr_ctx[i].host2rxdma_mon_ring_mask =
  836. host2rxdma_mon_ring_mask;
  837. soc->intr_ctx[i].rx_near_full_grp_1_mask =
  838. rx_near_full_grp_1_mask;
  839. soc->intr_ctx[i].rx_near_full_grp_2_mask =
  840. rx_near_full_grp_2_mask;
  841. soc->intr_ctx[i].tx_ring_near_full_mask =
  842. tx_ring_near_full_mask;
  843. soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
  844. soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
  845. soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
  846. soc->intr_ctx[i].soc = soc;
  847. num_irq = 0;
  848. dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
  849. &num_irq);
  850. if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
  851. tx_ring_near_full_mask) {
  852. dp_soc_near_full_interrupt_attach(soc, num_irq,
  853. irq_id_map, i);
  854. } else {
  855. napi_scale = wlan_cfg_get_napi_scale_factor(
  856. soc->wlan_cfg_ctx);
  857. if (!napi_scale)
  858. napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
  859. ret = hif_register_ext_group(soc->hif_handle,
  860. num_irq, irq_id_map, dp_service_srngs_wrapper,
  861. &soc->intr_ctx[i], "dp_intr",
  862. HIF_EXEC_NAPI_TYPE, napi_scale);
  863. }
  864. dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
  865. i, num_irq, irq_id_map[0], irq_id_map[1]);
  866. if (ret) {
  867. dp_init_err("%pK: failed, ret = %d", soc, ret);
  868. dp_soc_interrupt_detach(txrx_soc);
  869. return QDF_STATUS_E_FAILURE;
  870. }
  871. hif_event_history_init(soc->hif_handle, i);
  872. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  873. if (rx_err_ring_mask)
  874. rx_err_ring_intr_ctxt_id = i;
  875. if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
  876. soc->mon_intr_id_lmac_map[lmac_id] = i;
  877. lmac_id++;
  878. }
  879. }
  880. hif_configure_ext_group_interrupts(soc->hif_handle);
  881. if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
  882. hif_config_irq_clear_cpu_affinity(soc->hif_handle,
  883. rx_err_ring_intr_ctxt_id, 0);
  884. return QDF_STATUS_SUCCESS;
  885. }
  886. #define AVG_MAX_MPDUS_PER_TID 128
  887. #define AVG_TIDS_PER_CLIENT 2
  888. #define AVG_FLOWS_PER_TID 2
  889. #define AVG_MSDUS_PER_FLOW 128
  890. #define AVG_MSDUS_PER_MPDU 4
  891. void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
  892. {
  893. struct qdf_mem_multi_page_t *pages;
  894. if (mac_id != WLAN_INVALID_PDEV_ID) {
  895. pages = dp_monitor_get_link_desc_pages(soc, mac_id);
  896. } else {
  897. pages = &soc->link_desc_pages;
  898. }
  899. if (!pages) {
  900. dp_err("can not get link desc pages");
  901. QDF_ASSERT(0);
  902. return;
  903. }
  904. if (pages->dma_pages) {
  905. wlan_minidump_remove((void *)
  906. pages->dma_pages->page_v_addr_start,
  907. pages->num_pages * pages->page_size,
  908. soc->ctrl_psoc,
  909. WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
  910. "hw_link_desc_bank");
  911. dp_desc_multi_pages_mem_free(soc, QDF_DP_HW_LINK_DESC_TYPE,
  912. pages, 0, false);
  913. }
  914. }
  915. qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
  916. QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
  917. {
  918. hal_soc_handle_t hal_soc = soc->hal_soc;
  919. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  920. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  921. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  922. uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
  923. uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
  924. uint32_t num_mpdu_links_per_queue_desc =
  925. hal_num_mpdu_links_per_queue_desc(hal_soc);
  926. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  927. uint32_t *total_link_descs, total_mem_size;
  928. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  929. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  930. uint32_t num_entries;
  931. struct qdf_mem_multi_page_t *pages;
  932. struct dp_srng *dp_srng;
  933. uint8_t minidump_str[MINIDUMP_STR_SIZE];
  934. /* Only Tx queue descriptors are allocated from common link descriptor
  935. * pool Rx queue descriptors are not included in this because (REO queue
  936. * extension descriptors) they are expected to be allocated contiguously
  937. * with REO queue descriptors
  938. */
  939. if (mac_id != WLAN_INVALID_PDEV_ID) {
  940. pages = dp_monitor_get_link_desc_pages(soc, mac_id);
  941. /* dp_monitor_get_link_desc_pages returns NULL only
  942. * if monitor SOC is NULL
  943. */
  944. if (!pages) {
  945. dp_err("can not get link desc pages");
  946. QDF_ASSERT(0);
  947. return QDF_STATUS_E_FAULT;
  948. }
  949. dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
  950. num_entries = dp_srng->alloc_size /
  951. hal_srng_get_entrysize(soc->hal_soc,
  952. RXDMA_MONITOR_DESC);
  953. total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
  954. qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
  955. MINIDUMP_STR_SIZE);
  956. } else {
  957. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  958. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  959. num_mpdu_queue_descs = num_mpdu_link_descs /
  960. num_mpdu_links_per_queue_desc;
  961. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  962. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  963. num_msdus_per_link_desc;
  964. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  965. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  966. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  967. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  968. pages = &soc->link_desc_pages;
  969. total_link_descs = &soc->total_link_descs;
  970. qdf_str_lcopy(minidump_str, "link_desc_bank",
  971. MINIDUMP_STR_SIZE);
  972. }
  973. /* If link descriptor banks are allocated, return from here */
  974. if (pages->num_pages)
  975. return QDF_STATUS_SUCCESS;
  976. /* Round up to power of 2 */
  977. *total_link_descs = 1;
  978. while (*total_link_descs < num_entries)
  979. *total_link_descs <<= 1;
  980. dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
  981. soc, *total_link_descs, link_desc_size);
  982. total_mem_size = *total_link_descs * link_desc_size;
  983. total_mem_size += link_desc_align;
  984. dp_init_info("%pK: total_mem_size: %d",
  985. soc, total_mem_size);
  986. dp_set_max_page_size(pages, max_alloc_size);
  987. dp_desc_multi_pages_mem_alloc(soc, QDF_DP_HW_LINK_DESC_TYPE,
  988. pages,
  989. link_desc_size,
  990. *total_link_descs,
  991. 0, false);
  992. if (!pages->num_pages) {
  993. dp_err("Multi page alloc fail for hw link desc pool");
  994. return QDF_STATUS_E_FAULT;
  995. }
  996. wlan_minidump_log(pages->dma_pages->page_v_addr_start,
  997. pages->num_pages * pages->page_size,
  998. soc->ctrl_psoc,
  999. WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
  1000. "hw_link_desc_bank");
  1001. return QDF_STATUS_SUCCESS;
  1002. }
  1003. void dp_hw_link_desc_ring_free(struct dp_soc *soc)
  1004. {
  1005. uint32_t i;
  1006. uint32_t size = soc->wbm_idle_scatter_buf_size;
  1007. void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
  1008. qdf_dma_addr_t paddr;
  1009. if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
  1010. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1011. vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
  1012. paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
  1013. if (vaddr) {
  1014. qdf_mem_free_consistent(soc->osdev,
  1015. soc->osdev->dev,
  1016. size,
  1017. vaddr,
  1018. paddr,
  1019. 0);
  1020. vaddr = NULL;
  1021. }
  1022. }
  1023. } else {
  1024. wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
  1025. soc->wbm_idle_link_ring.alloc_size,
  1026. soc->ctrl_psoc,
  1027. WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
  1028. "wbm_idle_link_ring");
  1029. dp_srng_free(soc, &soc->wbm_idle_link_ring);
  1030. }
  1031. }
  1032. QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
  1033. {
  1034. uint32_t entry_size, i;
  1035. uint32_t total_mem_size;
  1036. qdf_dma_addr_t *baseaddr = NULL;
  1037. struct dp_srng *dp_srng;
  1038. uint32_t ring_type;
  1039. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  1040. uint32_t tlds;
  1041. ring_type = WBM_IDLE_LINK;
  1042. dp_srng = &soc->wbm_idle_link_ring;
  1043. tlds = soc->total_link_descs;
  1044. entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
  1045. total_mem_size = entry_size * tlds;
  1046. if (total_mem_size <= max_alloc_size) {
  1047. if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
  1048. dp_init_err("%pK: Link desc idle ring setup failed",
  1049. soc);
  1050. goto fail;
  1051. }
  1052. wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
  1053. soc->wbm_idle_link_ring.alloc_size,
  1054. soc->ctrl_psoc,
  1055. WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
  1056. "wbm_idle_link_ring");
  1057. } else {
  1058. uint32_t num_scatter_bufs;
  1059. uint32_t buf_size = 0;
  1060. soc->wbm_idle_scatter_buf_size =
  1061. hal_idle_list_scatter_buf_size(soc->hal_soc);
  1062. num_scatter_bufs = hal_idle_list_num_scatter_bufs(
  1063. soc->hal_soc, total_mem_size,
  1064. soc->wbm_idle_scatter_buf_size);
  1065. if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
  1066. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1067. FL("scatter bufs size out of bounds"));
  1068. goto fail;
  1069. }
  1070. for (i = 0; i < num_scatter_bufs; i++) {
  1071. baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
  1072. buf_size = soc->wbm_idle_scatter_buf_size;
  1073. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  1074. qdf_mem_alloc_consistent(soc->osdev,
  1075. soc->osdev->dev,
  1076. buf_size,
  1077. baseaddr);
  1078. if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  1079. QDF_TRACE(QDF_MODULE_ID_DP,
  1080. QDF_TRACE_LEVEL_ERROR,
  1081. FL("Scatter lst memory alloc fail"));
  1082. goto fail;
  1083. }
  1084. }
  1085. soc->num_scatter_bufs = num_scatter_bufs;
  1086. }
  1087. return QDF_STATUS_SUCCESS;
  1088. fail:
  1089. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  1090. void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
  1091. qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
  1092. if (vaddr) {
  1093. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  1094. soc->wbm_idle_scatter_buf_size,
  1095. vaddr,
  1096. paddr, 0);
  1097. vaddr = NULL;
  1098. }
  1099. }
  1100. return QDF_STATUS_E_NOMEM;
  1101. }
  1102. qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
  1103. QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
  1104. {
  1105. struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
  1106. if (dp_srng->base_vaddr_unaligned) {
  1107. if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
  1108. return QDF_STATUS_E_FAILURE;
  1109. }
  1110. return QDF_STATUS_SUCCESS;
  1111. }
  1112. void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
  1113. {
  1114. dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
  1115. }
  1116. #ifdef IPA_OFFLOAD
  1117. #define USE_1_IPA_RX_REO_RING 1
  1118. #define USE_2_IPA_RX_REO_RINGS 2
  1119. #define REO_DST_RING_SIZE_QCA6290 1023
  1120. #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
  1121. #define REO_DST_RING_SIZE_QCA8074 1023
  1122. #define REO_DST_RING_SIZE_QCN9000 2048
  1123. #else
  1124. #define REO_DST_RING_SIZE_QCA8074 8
  1125. #define REO_DST_RING_SIZE_QCN9000 8
  1126. #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
  1127. #ifdef IPA_WDI3_TX_TWO_PIPES
  1128. #ifdef DP_MEMORY_OPT
  1129. static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
  1130. {
  1131. return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
  1132. }
  1133. static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
  1134. {
  1135. dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
  1136. }
  1137. static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
  1138. {
  1139. return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
  1140. }
  1141. static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
  1142. {
  1143. dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
  1144. }
  1145. #else /* !DP_MEMORY_OPT */
  1146. static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
  1147. {
  1148. return 0;
  1149. }
  1150. static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
  1151. {
  1152. }
  1153. static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
  1154. {
  1155. return 0;
  1156. }
  1157. static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
  1158. {
  1159. }
  1160. #endif /* DP_MEMORY_OPT */
  1161. void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
  1162. {
  1163. hal_tx_init_data_ring(soc->hal_soc,
  1164. soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
  1165. }
  1166. #else /* !IPA_WDI3_TX_TWO_PIPES */
  1167. static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
  1168. {
  1169. return 0;
  1170. }
  1171. static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
  1172. {
  1173. }
  1174. static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
  1175. {
  1176. return 0;
  1177. }
  1178. static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
  1179. {
  1180. }
  1181. void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
  1182. {
  1183. }
  1184. #endif /* IPA_WDI3_TX_TWO_PIPES */
  1185. #else
  1186. #define REO_DST_RING_SIZE_QCA6290 1024
  1187. static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
  1188. {
  1189. return 0;
  1190. }
  1191. static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
  1192. {
  1193. }
  1194. static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
  1195. {
  1196. return 0;
  1197. }
  1198. static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
  1199. {
  1200. }
  1201. void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
  1202. {
  1203. }
  1204. #endif /* IPA_OFFLOAD */
  1205. /**
  1206. * dp_soc_reset_cpu_ring_map() - Reset cpu ring map
  1207. * @soc: Datapath soc handler
  1208. *
  1209. * This api resets the default cpu ring map
  1210. */
  1211. void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
  1212. {
  1213. uint8_t i;
  1214. int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1215. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  1216. switch (nss_config) {
  1217. case dp_nss_cfg_first_radio:
  1218. /*
  1219. * Setting Tx ring map for one nss offloaded radio
  1220. */
  1221. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
  1222. break;
  1223. case dp_nss_cfg_second_radio:
  1224. /*
  1225. * Setting Tx ring for two nss offloaded radios
  1226. */
  1227. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
  1228. break;
  1229. case dp_nss_cfg_dbdc:
  1230. /*
  1231. * Setting Tx ring map for 2 nss offloaded radios
  1232. */
  1233. soc->tx_ring_map[i] =
  1234. dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
  1235. break;
  1236. case dp_nss_cfg_dbtc:
  1237. /*
  1238. * Setting Tx ring map for 3 nss offloaded radios
  1239. */
  1240. soc->tx_ring_map[i] =
  1241. dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
  1242. break;
  1243. default:
  1244. dp_err("tx_ring_map failed due to invalid nss cfg");
  1245. break;
  1246. }
  1247. }
  1248. }
  1249. /**
  1250. * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
  1251. * unused WMAC hw rings
  1252. * @soc: DP Soc handle
  1253. * @mac_num: wmac num
  1254. *
  1255. * Return: Return void
  1256. */
  1257. static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
  1258. int mac_num)
  1259. {
  1260. uint8_t *grp_mask = NULL;
  1261. int group_number;
  1262. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  1263. group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
  1264. if (group_number < 0)
  1265. dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_BUF, mac_num %d",
  1266. soc, mac_num);
  1267. else
  1268. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  1269. group_number, 0x0);
  1270. grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
  1271. group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
  1272. if (group_number < 0)
  1273. dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_MONITOR_DST, mac_num %d",
  1274. soc, mac_num);
  1275. else
  1276. wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
  1277. group_number, 0x0);
  1278. grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
  1279. group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
  1280. if (group_number < 0)
  1281. dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_DST, mac_num %d",
  1282. soc, mac_num);
  1283. else
  1284. wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
  1285. group_number, 0x0);
  1286. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
  1287. group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
  1288. if (group_number < 0)
  1289. dp_init_debug("%pK: ring not part of any group; ring_type: RXDMA_MONITOR_BUF, mac_num %d",
  1290. soc, mac_num);
  1291. else
  1292. wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
  1293. group_number, 0x0);
  1294. }
  1295. #ifdef IPA_OFFLOAD
  1296. #ifdef IPA_WDI3_VLAN_SUPPORT
  1297. /**
  1298. * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
  1299. * ring for vlan tagged traffic
  1300. * @soc: DP Soc handle
  1301. *
  1302. * Return: Return void
  1303. */
  1304. void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
  1305. {
  1306. uint8_t *grp_mask = NULL;
  1307. int group_number, mask;
  1308. if (!wlan_ipa_is_vlan_enabled())
  1309. return;
  1310. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  1311. group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
  1312. if (group_number < 0) {
  1313. dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
  1314. soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
  1315. return;
  1316. }
  1317. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
  1318. /* reset the interrupt mask for offloaded ring */
  1319. mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
  1320. /*
  1321. * set the interrupt mask to zero for rx offloaded radio.
  1322. */
  1323. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  1324. }
  1325. #else
  1326. inline
  1327. void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
  1328. { }
  1329. #endif /* IPA_WDI3_VLAN_SUPPORT */
  1330. #else
  1331. inline
  1332. void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
  1333. { }
  1334. #endif /* IPA_OFFLOAD */
  1335. /**
  1336. * dp_soc_reset_intr_mask() - reset interrupt mask
  1337. * @soc: DP Soc handle
  1338. *
  1339. * Return: Return void
  1340. */
  1341. void dp_soc_reset_intr_mask(struct dp_soc *soc)
  1342. {
  1343. uint8_t j;
  1344. uint8_t *grp_mask = NULL;
  1345. int group_number, mask, num_ring;
  1346. /* number of tx ring */
  1347. num_ring = soc->num_tcl_data_rings;
  1348. /*
  1349. * group mask for tx completion ring.
  1350. */
  1351. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  1352. /* loop and reset the mask for only offloaded ring */
  1353. for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
  1354. /*
  1355. * Group number corresponding to tx offloaded ring.
  1356. */
  1357. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  1358. if (group_number < 0) {
  1359. dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
  1360. soc, WBM2SW_RELEASE, j);
  1361. continue;
  1362. }
  1363. mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
  1364. if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
  1365. (!mask)) {
  1366. continue;
  1367. }
  1368. /* reset the tx mask for offloaded ring */
  1369. mask &= (~(1 << j));
  1370. /*
  1371. * reset the interrupt mask for offloaded ring.
  1372. */
  1373. wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  1374. }
  1375. /* number of rx rings */
  1376. num_ring = soc->num_reo_dest_rings;
  1377. /*
  1378. * group mask for reo destination ring.
  1379. */
  1380. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  1381. /* loop and reset the mask for only offloaded ring */
  1382. for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
  1383. /*
  1384. * Group number corresponding to rx offloaded ring.
  1385. */
  1386. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  1387. if (group_number < 0) {
  1388. dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
  1389. soc, REO_DST, j);
  1390. continue;
  1391. }
  1392. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
  1393. if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
  1394. (!mask)) {
  1395. continue;
  1396. }
  1397. /* reset the interrupt mask for offloaded ring */
  1398. mask &= (~(1 << j));
  1399. /*
  1400. * set the interrupt mask to zero for rx offloaded radio.
  1401. */
  1402. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
  1403. }
  1404. /*
  1405. * group mask for Rx buffer refill ring
  1406. */
  1407. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  1408. /* loop and reset the mask for only offloaded ring */
  1409. for (j = 0; j < MAX_PDEV_CNT; j++) {
  1410. int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  1411. if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
  1412. continue;
  1413. }
  1414. /*
  1415. * Group number corresponding to rx offloaded ring.
  1416. */
  1417. group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
  1418. if (group_number < 0) {
  1419. dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
  1420. soc, REO_DST, lmac_id);
  1421. continue;
  1422. }
  1423. /* set the interrupt mask for offloaded ring */
  1424. mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  1425. group_number);
  1426. mask &= (~(1 << lmac_id));
  1427. /*
  1428. * set the interrupt mask to zero for rx offloaded radio.
  1429. */
  1430. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  1431. group_number, mask);
  1432. }
  1433. grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
  1434. for (j = 0; j < num_ring; j++) {
  1435. if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
  1436. continue;
  1437. }
  1438. /*
  1439. * Group number corresponding to rx err ring.
  1440. */
  1441. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  1442. if (group_number < 0) {
  1443. dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
  1444. soc, REO_EXCEPTION, j);
  1445. continue;
  1446. }
  1447. wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
  1448. group_number, 0);
  1449. }
  1450. }
  1451. #ifdef IPA_OFFLOAD
  1452. bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
  1453. uint32_t *remap1, uint32_t *remap2)
  1454. {
  1455. uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
  1456. REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
  1457. REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
  1458. switch (soc->arch_id) {
  1459. case CDP_ARCH_TYPE_BE:
  1460. hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
  1461. soc->num_reo_dest_rings -
  1462. USE_2_IPA_RX_REO_RINGS, remap1,
  1463. remap2);
  1464. break;
  1465. case CDP_ARCH_TYPE_LI:
  1466. if (wlan_ipa_is_vlan_enabled()) {
  1467. hal_compute_reo_remap_ix2_ix3(
  1468. soc->hal_soc, ring,
  1469. soc->num_reo_dest_rings -
  1470. USE_2_IPA_RX_REO_RINGS, remap1,
  1471. remap2);
  1472. } else {
  1473. hal_compute_reo_remap_ix2_ix3(
  1474. soc->hal_soc, ring,
  1475. soc->num_reo_dest_rings -
  1476. USE_1_IPA_RX_REO_RING, remap1,
  1477. remap2);
  1478. }
  1479. hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
  1480. break;
  1481. default:
  1482. dp_err("unknown arch_id 0x%x", soc->arch_id);
  1483. QDF_BUG(0);
  1484. }
  1485. dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
  1486. return true;
  1487. }
  1488. #ifdef IPA_WDI3_TX_TWO_PIPES
  1489. static bool dp_ipa_is_alt_tx_ring(int index)
  1490. {
  1491. return index == IPA_TX_ALT_RING_IDX;
  1492. }
  1493. static bool dp_ipa_is_alt_tx_comp_ring(int index)
  1494. {
  1495. return index == IPA_TX_ALT_COMP_RING_IDX;
  1496. }
  1497. #else /* !IPA_WDI3_TX_TWO_PIPES */
  1498. static bool dp_ipa_is_alt_tx_ring(int index)
  1499. {
  1500. return false;
  1501. }
  1502. static bool dp_ipa_is_alt_tx_comp_ring(int index)
  1503. {
  1504. return false;
  1505. }
  1506. #endif /* IPA_WDI3_TX_TWO_PIPES */
  1507. /**
  1508. * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
  1509. *
  1510. * @tx_ring_num: Tx ring number
  1511. * @tx_ipa_ring_sz: Return param only updated for IPA.
  1512. * @soc_cfg_ctx: dp soc cfg context
  1513. *
  1514. * Return: None
  1515. */
  1516. static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
  1517. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
  1518. {
  1519. if (!soc_cfg_ctx->ipa_enabled)
  1520. return;
  1521. if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
  1522. *tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
  1523. else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
  1524. *tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
  1525. }
  1526. /**
  1527. * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
  1528. *
  1529. * @tx_comp_ring_num: Tx comp ring number
  1530. * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
  1531. * @soc_cfg_ctx: dp soc cfg context
  1532. *
  1533. * Return: None
  1534. */
  1535. static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
  1536. int *tx_comp_ipa_ring_sz,
  1537. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
  1538. {
  1539. if (!soc_cfg_ctx->ipa_enabled)
  1540. return;
  1541. if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
  1542. *tx_comp_ipa_ring_sz =
  1543. wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
  1544. else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
  1545. *tx_comp_ipa_ring_sz =
  1546. wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
  1547. }
  1548. #else
  1549. static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
  1550. {
  1551. uint8_t num = 0;
  1552. switch (value) {
  1553. /* should we have all the different possible ring configs */
  1554. case 0xFF:
  1555. num = 8;
  1556. ring[0] = REO_REMAP_SW1;
  1557. ring[1] = REO_REMAP_SW2;
  1558. ring[2] = REO_REMAP_SW3;
  1559. ring[3] = REO_REMAP_SW4;
  1560. ring[4] = REO_REMAP_SW5;
  1561. ring[5] = REO_REMAP_SW6;
  1562. ring[6] = REO_REMAP_SW7;
  1563. ring[7] = REO_REMAP_SW8;
  1564. break;
  1565. case 0x3F:
  1566. num = 6;
  1567. ring[0] = REO_REMAP_SW1;
  1568. ring[1] = REO_REMAP_SW2;
  1569. ring[2] = REO_REMAP_SW3;
  1570. ring[3] = REO_REMAP_SW4;
  1571. ring[4] = REO_REMAP_SW5;
  1572. ring[5] = REO_REMAP_SW6;
  1573. break;
  1574. case 0xF:
  1575. num = 4;
  1576. ring[0] = REO_REMAP_SW1;
  1577. ring[1] = REO_REMAP_SW2;
  1578. ring[2] = REO_REMAP_SW3;
  1579. ring[3] = REO_REMAP_SW4;
  1580. break;
  1581. case 0xE:
  1582. num = 3;
  1583. ring[0] = REO_REMAP_SW2;
  1584. ring[1] = REO_REMAP_SW3;
  1585. ring[2] = REO_REMAP_SW4;
  1586. break;
  1587. case 0xD:
  1588. num = 3;
  1589. ring[0] = REO_REMAP_SW1;
  1590. ring[1] = REO_REMAP_SW3;
  1591. ring[2] = REO_REMAP_SW4;
  1592. break;
  1593. case 0xC:
  1594. num = 2;
  1595. ring[0] = REO_REMAP_SW3;
  1596. ring[1] = REO_REMAP_SW4;
  1597. break;
  1598. case 0xB:
  1599. num = 3;
  1600. ring[0] = REO_REMAP_SW1;
  1601. ring[1] = REO_REMAP_SW2;
  1602. ring[2] = REO_REMAP_SW4;
  1603. break;
  1604. case 0xA:
  1605. num = 2;
  1606. ring[0] = REO_REMAP_SW2;
  1607. ring[1] = REO_REMAP_SW4;
  1608. break;
  1609. case 0x9:
  1610. num = 2;
  1611. ring[0] = REO_REMAP_SW1;
  1612. ring[1] = REO_REMAP_SW4;
  1613. break;
  1614. case 0x8:
  1615. num = 1;
  1616. ring[0] = REO_REMAP_SW4;
  1617. break;
  1618. case 0x7:
  1619. num = 3;
  1620. ring[0] = REO_REMAP_SW1;
  1621. ring[1] = REO_REMAP_SW2;
  1622. ring[2] = REO_REMAP_SW3;
  1623. break;
  1624. case 0x6:
  1625. num = 2;
  1626. ring[0] = REO_REMAP_SW2;
  1627. ring[1] = REO_REMAP_SW3;
  1628. break;
  1629. case 0x5:
  1630. num = 2;
  1631. ring[0] = REO_REMAP_SW1;
  1632. ring[1] = REO_REMAP_SW3;
  1633. break;
  1634. case 0x4:
  1635. num = 1;
  1636. ring[0] = REO_REMAP_SW3;
  1637. break;
  1638. case 0x3:
  1639. num = 2;
  1640. ring[0] = REO_REMAP_SW1;
  1641. ring[1] = REO_REMAP_SW2;
  1642. break;
  1643. case 0x2:
  1644. num = 1;
  1645. ring[0] = REO_REMAP_SW2;
  1646. break;
  1647. case 0x1:
  1648. num = 1;
  1649. ring[0] = REO_REMAP_SW1;
  1650. break;
  1651. default:
  1652. dp_err("unknown reo ring map 0x%x", value);
  1653. QDF_BUG(0);
  1654. }
  1655. return num;
  1656. }
  1657. bool dp_reo_remap_config(struct dp_soc *soc,
  1658. uint32_t *remap0,
  1659. uint32_t *remap1,
  1660. uint32_t *remap2)
  1661. {
  1662. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1663. uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
  1664. uint8_t num;
  1665. uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
  1666. uint32_t value;
  1667. switch (offload_radio) {
  1668. case dp_nss_cfg_default:
  1669. value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
  1670. num = dp_reo_ring_selection(value, ring);
  1671. hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
  1672. num, remap1, remap2);
  1673. hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
  1674. break;
  1675. case dp_nss_cfg_first_radio:
  1676. value = reo_config & 0xE;
  1677. num = dp_reo_ring_selection(value, ring);
  1678. hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
  1679. num, remap1, remap2);
  1680. break;
  1681. case dp_nss_cfg_second_radio:
  1682. value = reo_config & 0xD;
  1683. num = dp_reo_ring_selection(value, ring);
  1684. hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
  1685. num, remap1, remap2);
  1686. break;
  1687. case dp_nss_cfg_dbdc:
  1688. case dp_nss_cfg_dbtc:
  1689. /* return false if both or all are offloaded to NSS */
  1690. return false;
  1691. }
  1692. dp_debug("remap1 %x remap2 %x offload_radio %u",
  1693. *remap1, *remap2, offload_radio);
  1694. return true;
  1695. }
  1696. static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
  1697. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
  1698. {
  1699. }
  1700. static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
  1701. int *tx_comp_ipa_ring_sz,
  1702. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
  1703. {
  1704. }
  1705. #endif /* IPA_OFFLOAD */
  1706. /**
  1707. * dp_reo_frag_dst_set() - configure reo register to set the
  1708. * fragment destination ring
  1709. * @soc: Datapath soc
  1710. * @frag_dst_ring: output parameter to set fragment destination ring
  1711. *
  1712. * Based on offload_radio below fragment destination rings is selected
  1713. * 0 - TCL
  1714. * 1 - SW1
  1715. * 2 - SW2
  1716. * 3 - SW3
  1717. * 4 - SW4
  1718. * 5 - Release
  1719. * 6 - FW
  1720. * 7 - alternate select
  1721. *
  1722. * Return: void
  1723. */
  1724. void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
  1725. {
  1726. uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
  1727. switch (offload_radio) {
  1728. case dp_nss_cfg_default:
  1729. *frag_dst_ring = REO_REMAP_TCL;
  1730. break;
  1731. case dp_nss_cfg_first_radio:
  1732. /*
  1733. * This configuration is valid for single band radio which
  1734. * is also NSS offload.
  1735. */
  1736. case dp_nss_cfg_dbdc:
  1737. case dp_nss_cfg_dbtc:
  1738. *frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
  1739. break;
  1740. default:
  1741. dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
  1742. break;
  1743. }
  1744. }
  1745. #ifdef WLAN_FEATURE_STATS_EXT
  1746. static inline void dp_create_ext_stats_event(struct dp_soc *soc)
  1747. {
  1748. qdf_event_create(&soc->rx_hw_stats_event);
  1749. }
  1750. #else
  1751. static inline void dp_create_ext_stats_event(struct dp_soc *soc)
  1752. {
  1753. }
  1754. #endif
  1755. static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
  1756. {
  1757. int tcl_ring_num, wbm_ring_num;
  1758. wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
  1759. index,
  1760. &tcl_ring_num,
  1761. &wbm_ring_num);
  1762. if (tcl_ring_num == -1) {
  1763. dp_err("incorrect tcl ring num for index %u", index);
  1764. return;
  1765. }
  1766. dp_ssr_dump_srng_unregister("tcl_data_ring", index);
  1767. dp_ssr_dump_srng_unregister("tx_comp_ring", index);
  1768. wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
  1769. soc->tcl_data_ring[index].alloc_size,
  1770. soc->ctrl_psoc,
  1771. WLAN_MD_DP_SRNG_TCL_DATA,
  1772. "tcl_data_ring");
  1773. dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
  1774. dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
  1775. tcl_ring_num);
  1776. if (wbm_ring_num == INVALID_WBM_RING_NUM)
  1777. return;
  1778. wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
  1779. soc->tx_comp_ring[index].alloc_size,
  1780. soc->ctrl_psoc,
  1781. WLAN_MD_DP_SRNG_TX_COMP,
  1782. "tcl_comp_ring");
  1783. dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
  1784. wbm_ring_num);
  1785. }
  1786. /**
  1787. * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
  1788. * ring pair
  1789. * @soc: DP soc pointer
  1790. * @index: index of soc->tcl_data or soc->tx_comp to initialize
  1791. *
  1792. * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
  1793. */
  1794. static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
  1795. uint8_t index)
  1796. {
  1797. int tcl_ring_num, wbm_ring_num;
  1798. uint8_t bm_id;
  1799. if (index >= MAX_TCL_DATA_RINGS) {
  1800. dp_err("unexpected index!");
  1801. QDF_BUG(0);
  1802. goto fail1;
  1803. }
  1804. wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
  1805. index,
  1806. &tcl_ring_num,
  1807. &wbm_ring_num);
  1808. if (tcl_ring_num == -1) {
  1809. dp_err("incorrect tcl ring num for index %u", index);
  1810. goto fail1;
  1811. }
  1812. dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
  1813. if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
  1814. tcl_ring_num, 0)) {
  1815. dp_err("dp_srng_init failed for tcl_data_ring");
  1816. goto fail1;
  1817. }
  1818. wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
  1819. soc->tcl_data_ring[index].alloc_size,
  1820. soc->ctrl_psoc,
  1821. WLAN_MD_DP_SRNG_TCL_DATA,
  1822. "tcl_data_ring");
  1823. if (wbm_ring_num == INVALID_WBM_RING_NUM)
  1824. goto set_rbm;
  1825. if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
  1826. wbm_ring_num, 0)) {
  1827. dp_err("dp_srng_init failed for tx_comp_ring");
  1828. goto fail1;
  1829. }
  1830. dp_ssr_dump_srng_register("tcl_data_ring",
  1831. &soc->tcl_data_ring[index], index);
  1832. dp_ssr_dump_srng_register("tx_comp_ring",
  1833. &soc->tx_comp_ring[index], index);
  1834. wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
  1835. soc->tx_comp_ring[index].alloc_size,
  1836. soc->ctrl_psoc,
  1837. WLAN_MD_DP_SRNG_TX_COMP,
  1838. "tcl_comp_ring");
  1839. set_rbm:
  1840. bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
  1841. soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
  1842. return QDF_STATUS_SUCCESS;
  1843. fail1:
  1844. return QDF_STATUS_E_FAILURE;
  1845. }
  1846. static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
  1847. {
  1848. dp_debug("index %u", index);
  1849. dp_srng_free(soc, &soc->tcl_data_ring[index]);
  1850. dp_srng_free(soc, &soc->tx_comp_ring[index]);
  1851. }
  1852. /**
  1853. * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
  1854. * ring pair for the given "index"
  1855. * @soc: DP soc pointer
  1856. * @index: index of soc->tcl_data or soc->tx_comp to initialize
  1857. *
  1858. * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
  1859. */
  1860. static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
  1861. uint8_t index)
  1862. {
  1863. int tx_ring_size;
  1864. int tx_comp_ring_size;
  1865. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
  1866. int cached = 0;
  1867. if (index >= MAX_TCL_DATA_RINGS) {
  1868. dp_err("unexpected index!");
  1869. QDF_BUG(0);
  1870. goto fail1;
  1871. }
  1872. dp_debug("index %u", index);
  1873. tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
  1874. dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
  1875. if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
  1876. tx_ring_size, cached)) {
  1877. dp_err("dp_srng_alloc failed for tcl_data_ring");
  1878. goto fail1;
  1879. }
  1880. tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
  1881. dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
  1882. /* Enable cached TCL desc if NSS offload is disabled */
  1883. if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
  1884. cached = WLAN_CFG_DST_RING_CACHED_DESC;
  1885. if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
  1886. INVALID_WBM_RING_NUM)
  1887. return QDF_STATUS_SUCCESS;
  1888. if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
  1889. tx_comp_ring_size, cached)) {
  1890. dp_err("dp_srng_alloc failed for tx_comp_ring");
  1891. goto fail1;
  1892. }
  1893. return QDF_STATUS_SUCCESS;
  1894. fail1:
  1895. return QDF_STATUS_E_FAILURE;
  1896. }
  1897. /**
  1898. * dp_dscp_tid_map_setup() - Initialize the dscp-tid maps
  1899. * @pdev: DP_PDEV handle
  1900. *
  1901. * Return: void
  1902. */
  1903. void
  1904. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  1905. {
  1906. uint8_t map_id;
  1907. struct dp_soc *soc = pdev->soc;
  1908. if (!soc)
  1909. return;
  1910. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  1911. qdf_mem_copy(pdev->dscp_tid_map[map_id],
  1912. default_dscp_tid_map,
  1913. sizeof(default_dscp_tid_map));
  1914. }
  1915. for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
  1916. hal_tx_set_dscp_tid_map(soc->hal_soc,
  1917. default_dscp_tid_map,
  1918. map_id);
  1919. }
  1920. }
  1921. /**
  1922. * dp_pcp_tid_map_setup() - Initialize the pcp-tid maps
  1923. * @pdev: DP_PDEV handle
  1924. *
  1925. * Return: void
  1926. */
  1927. void
  1928. dp_pcp_tid_map_setup(struct dp_pdev *pdev)
  1929. {
  1930. struct dp_soc *soc = pdev->soc;
  1931. if (!soc)
  1932. return;
  1933. qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
  1934. sizeof(default_pcp_tid_map));
  1935. hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
  1936. }
  1937. #ifndef DP_UMAC_HW_RESET_SUPPORT
  1938. static inline
  1939. #endif
  1940. void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  1941. {
  1942. struct reo_desc_list_node *desc;
  1943. struct dp_rx_tid *rx_tid;
  1944. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  1945. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  1946. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  1947. rx_tid = &desc->rx_tid;
  1948. qdf_mem_unmap_nbytes_single(soc->osdev,
  1949. rx_tid->hw_qdesc_paddr,
  1950. QDF_DMA_BIDIRECTIONAL,
  1951. rx_tid->hw_qdesc_alloc_size);
  1952. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1953. qdf_mem_free(desc);
  1954. }
  1955. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  1956. qdf_list_destroy(&soc->reo_desc_freelist);
  1957. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  1958. }
  1959. #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
  1960. /**
  1961. * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
  1962. * for deferred reo desc list
  1963. * @soc: Datapath soc handle
  1964. *
  1965. * Return: void
  1966. */
  1967. static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
  1968. {
  1969. qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
  1970. qdf_list_create(&soc->reo_desc_deferred_freelist,
  1971. REO_DESC_DEFERRED_FREELIST_SIZE);
  1972. soc->reo_desc_deferred_freelist_init = true;
  1973. }
  1974. /**
  1975. * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
  1976. * free the leftover REO QDESCs
  1977. * @soc: Datapath soc handle
  1978. *
  1979. * Return: void
  1980. */
  1981. static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
  1982. {
  1983. struct reo_desc_deferred_freelist_node *desc;
  1984. qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
  1985. soc->reo_desc_deferred_freelist_init = false;
  1986. while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
  1987. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  1988. qdf_mem_unmap_nbytes_single(soc->osdev,
  1989. desc->hw_qdesc_paddr,
  1990. QDF_DMA_BIDIRECTIONAL,
  1991. desc->hw_qdesc_alloc_size);
  1992. qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
  1993. qdf_mem_free(desc);
  1994. }
  1995. qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
  1996. qdf_list_destroy(&soc->reo_desc_deferred_freelist);
  1997. qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
  1998. }
  1999. #else
  2000. static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
  2001. {
  2002. }
  2003. static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
  2004. {
  2005. }
  2006. #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
  2007. /**
  2008. * dp_soc_reset_txrx_ring_map() - reset tx ring map
  2009. * @soc: DP SOC handle
  2010. *
  2011. */
  2012. static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
  2013. {
  2014. uint32_t i;
  2015. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
  2016. soc->tx_ring_map[i] = 0;
  2017. }
  2018. /**
  2019. * dp_soc_deinit() - Deinitialize txrx SOC
  2020. * @txrx_soc: Opaque DP SOC handle
  2021. *
  2022. * Return: None
  2023. */
  2024. void dp_soc_deinit(void *txrx_soc)
  2025. {
  2026. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  2027. struct htt_soc *htt_soc = soc->htt_handle;
  2028. dp_monitor_soc_deinit(soc);
  2029. /* free peer tables & AST tables allocated during peer_map_attach */
  2030. if (soc->peer_map_attach_success) {
  2031. dp_peer_find_detach(soc);
  2032. soc->arch_ops.txrx_peer_map_detach(soc);
  2033. soc->peer_map_attach_success = FALSE;
  2034. }
  2035. qdf_flush_work(&soc->htt_stats.work);
  2036. qdf_disable_work(&soc->htt_stats.work);
  2037. qdf_spinlock_destroy(&soc->htt_stats.lock);
  2038. dp_soc_reset_txrx_ring_map(soc);
  2039. dp_reo_desc_freelist_destroy(soc);
  2040. dp_reo_desc_deferred_freelist_destroy(soc);
  2041. DEINIT_RX_HW_STATS_LOCK(soc);
  2042. qdf_spinlock_destroy(&soc->ast_lock);
  2043. dp_peer_mec_spinlock_destroy(soc);
  2044. qdf_nbuf_queue_free(&soc->htt_stats.msg);
  2045. qdf_nbuf_queue_free(&soc->invalid_buf_queue);
  2046. qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
  2047. qdf_spinlock_destroy(&soc->vdev_map_lock);
  2048. dp_reo_cmdlist_destroy(soc);
  2049. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  2050. dp_soc_tx_desc_sw_pools_deinit(soc);
  2051. dp_soc_srng_deinit(soc);
  2052. dp_hw_link_desc_ring_deinit(soc);
  2053. dp_soc_print_inactive_objects(soc);
  2054. qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
  2055. qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
  2056. htt_soc_htc_dealloc(soc->htt_handle);
  2057. htt_soc_detach(htt_soc);
  2058. /* Free wbm sg list and reset flags in down path */
  2059. dp_rx_wbm_sg_list_deinit(soc);
  2060. wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
  2061. WLAN_MD_DP_SOC, "dp_soc");
  2062. }
  2063. #ifdef QCA_HOST2FW_RXBUF_RING
  2064. void
  2065. dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
  2066. int lmac_id)
  2067. {
  2068. if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
  2069. htt_srng_setup(soc->htt_handle, mac_id,
  2070. soc->rxdma_err_dst_ring[lmac_id].hal_srng,
  2071. RXDMA_DST);
  2072. }
  2073. #endif
  2074. void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
  2075. enum cdp_host_reo_dest_ring *reo_dest,
  2076. bool *hash_based)
  2077. {
  2078. struct dp_soc *soc;
  2079. struct dp_pdev *pdev;
  2080. pdev = vdev->pdev;
  2081. soc = pdev->soc;
  2082. /*
  2083. * hash based steering is disabled for Radios which are offloaded
  2084. * to NSS
  2085. */
  2086. if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
  2087. *hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  2088. /*
  2089. * Below line of code will ensure the proper reo_dest ring is chosen
  2090. * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
  2091. */
  2092. *reo_dest = pdev->reo_dest;
  2093. }
  2094. #ifdef IPA_OFFLOAD
  2095. /**
  2096. * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
  2097. * @vdev: Virtual device
  2098. *
  2099. * Return: true if the vdev is of subtype P2P
  2100. * false if the vdev is of any other subtype
  2101. */
  2102. static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
  2103. {
  2104. if (vdev->subtype == wlan_op_subtype_p2p_device ||
  2105. vdev->subtype == wlan_op_subtype_p2p_cli ||
  2106. vdev->subtype == wlan_op_subtype_p2p_go)
  2107. return true;
  2108. return false;
  2109. }
  2110. /**
  2111. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  2112. * @vdev: Datapath VDEV handle
  2113. * @setup_info:
  2114. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  2115. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  2116. * @lmac_peer_id_msb:
  2117. *
  2118. * If IPA is enabled in ini, for SAP mode, disable hash based
  2119. * steering, use default reo_dst ring for RX. Use config values for other modes.
  2120. *
  2121. * Return: None
  2122. */
  2123. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  2124. struct cdp_peer_setup_info *setup_info,
  2125. enum cdp_host_reo_dest_ring *reo_dest,
  2126. bool *hash_based,
  2127. uint8_t *lmac_peer_id_msb)
  2128. {
  2129. struct dp_soc *soc;
  2130. struct dp_pdev *pdev;
  2131. pdev = vdev->pdev;
  2132. soc = pdev->soc;
  2133. dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
  2134. /* For P2P-GO interfaces we do not need to change the REO
  2135. * configuration even if IPA config is enabled
  2136. */
  2137. if (dp_is_vdev_subtype_p2p(vdev))
  2138. return;
  2139. /*
  2140. * If IPA is enabled, disable hash-based flow steering and set
  2141. * reo_dest_ring_4 as the REO ring to receive packets on.
  2142. * IPA is configured to reap reo_dest_ring_4.
  2143. *
  2144. * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
  2145. * value enum value is from 1 - 4.
  2146. * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
  2147. */
  2148. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  2149. if (dp_ipa_is_mdm_platform()) {
  2150. *reo_dest = IPA_REO_DEST_RING_IDX + 1;
  2151. if (vdev->opmode == wlan_op_mode_ap)
  2152. *hash_based = 0;
  2153. } else {
  2154. dp_debug("opt_dp: default HOST reo ring is set");
  2155. }
  2156. }
  2157. }
  2158. #else
  2159. /**
  2160. * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
  2161. * @vdev: Datapath VDEV handle
  2162. * @setup_info:
  2163. * @reo_dest: pointer to default reo_dest ring for vdev to be populated
  2164. * @hash_based: pointer to hash value (enabled/disabled) to be populated
  2165. * @lmac_peer_id_msb:
  2166. *
  2167. * Use system config values for hash based steering.
  2168. * Return: None
  2169. */
  2170. static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
  2171. struct cdp_peer_setup_info *setup_info,
  2172. enum cdp_host_reo_dest_ring *reo_dest,
  2173. bool *hash_based,
  2174. uint8_t *lmac_peer_id_msb)
  2175. {
  2176. struct dp_soc *soc = vdev->pdev->soc;
  2177. soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
  2178. lmac_peer_id_msb);
  2179. }
  2180. #endif /* IPA_OFFLOAD */
  2181. #if defined WLAN_FEATURE_11BE_MLO && defined DP_MLO_LINK_STATS_SUPPORT
  2182. static inline uint8_t
  2183. dp_peer_get_local_link_id(struct dp_peer *peer, struct dp_txrx_peer *txrx_peer)
  2184. {
  2185. struct dp_local_link_id_peer_map *ll_id_peer_map =
  2186. &txrx_peer->ll_id_peer_map[0];
  2187. int i;
  2188. /*
  2189. * Search for the peer entry in the
  2190. * local_link_id to peer mac_addr mapping table
  2191. */
  2192. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  2193. if (ll_id_peer_map[i].in_use &&
  2194. !qdf_mem_cmp(&peer->mac_addr.raw[0],
  2195. &ll_id_peer_map[i].mac_addr.raw[0],
  2196. QDF_MAC_ADDR_SIZE))
  2197. return ll_id_peer_map[i].local_link_id + 1;
  2198. }
  2199. /*
  2200. * Create new entry for peer in the
  2201. * local_link_id to peer mac_addr mapping table
  2202. */
  2203. for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
  2204. if (ll_id_peer_map[i].in_use)
  2205. continue;
  2206. ll_id_peer_map[i].in_use = 1;
  2207. ll_id_peer_map[i].local_link_id = i;
  2208. qdf_mem_copy(&ll_id_peer_map[i].mac_addr.raw[0],
  2209. &peer->mac_addr.raw[0], QDF_MAC_ADDR_SIZE);
  2210. return ll_id_peer_map[i].local_link_id + 1;
  2211. }
  2212. /* We should not hit this case..!! Assert ?? */
  2213. return 0;
  2214. }
  2215. /**
  2216. * dp_peer_set_local_link_id() - Set local link id
  2217. * @peer: dp peer handle
  2218. *
  2219. * Return: None
  2220. */
  2221. static inline void
  2222. dp_peer_set_local_link_id(struct dp_peer *peer)
  2223. {
  2224. struct dp_txrx_peer *txrx_peer;
  2225. if (!IS_MLO_DP_LINK_PEER(peer))
  2226. return;
  2227. txrx_peer = dp_get_txrx_peer(peer);
  2228. if (txrx_peer)
  2229. peer->local_link_id = dp_peer_get_local_link_id(peer,
  2230. txrx_peer);
  2231. dp_info("Peer " QDF_MAC_ADDR_FMT " txrx_peer %pK local_link_id %d",
  2232. QDF_MAC_ADDR_REF(peer->mac_addr.raw), txrx_peer,
  2233. peer->local_link_id);
  2234. }
  2235. #else
  2236. static inline void
  2237. dp_peer_set_local_link_id(struct dp_peer *peer)
  2238. {
  2239. }
  2240. #endif
  2241. /**
  2242. * dp_peer_setup_wifi3() - initialize the peer
  2243. * @soc_hdl: soc handle object
  2244. * @vdev_id: vdev_id of vdev object
  2245. * @peer_mac: Peer's mac address
  2246. * @setup_info: peer setup info for MLO
  2247. *
  2248. * Return: QDF_STATUS
  2249. */
  2250. QDF_STATUS
  2251. dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2252. uint8_t *peer_mac,
  2253. struct cdp_peer_setup_info *setup_info)
  2254. {
  2255. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  2256. struct dp_pdev *pdev;
  2257. bool hash_based = 0;
  2258. enum cdp_host_reo_dest_ring reo_dest;
  2259. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2260. struct dp_vdev *vdev = NULL;
  2261. struct dp_peer *peer =
  2262. dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
  2263. DP_MOD_ID_CDP);
  2264. struct dp_peer *mld_peer = NULL;
  2265. enum wlan_op_mode vdev_opmode;
  2266. uint8_t lmac_peer_id_msb = 0;
  2267. if (!peer)
  2268. return QDF_STATUS_E_FAILURE;
  2269. vdev = peer->vdev;
  2270. if (!vdev) {
  2271. status = QDF_STATUS_E_FAILURE;
  2272. goto fail;
  2273. }
  2274. /* save vdev related member in case vdev freed */
  2275. vdev_opmode = vdev->opmode;
  2276. pdev = vdev->pdev;
  2277. dp_peer_setup_get_reo_hash(vdev, setup_info,
  2278. &reo_dest, &hash_based,
  2279. &lmac_peer_id_msb);
  2280. dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
  2281. peer, vdev, vdev->vdev_id,
  2282. setup_info);
  2283. dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
  2284. "hash-based-steering:%d default-reo_dest:%u",
  2285. pdev->pdev_id, vdev->vdev_id,
  2286. vdev->opmode, peer,
  2287. QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
  2288. /*
  2289. * There are corner cases where the AD1 = AD2 = "VAPs address"
  2290. * i.e both the devices have same MAC address. In these
  2291. * cases we want such pkts to be processed in NULL Q handler
  2292. * which is REO2TCL ring. for this reason we should
  2293. * not setup reo_queues and default route for bss_peer.
  2294. */
  2295. if (!IS_MLO_DP_MLD_PEER(peer))
  2296. dp_monitor_peer_tx_init(pdev, peer);
  2297. if (!setup_info)
  2298. if (dp_peer_legacy_setup(soc, peer) !=
  2299. QDF_STATUS_SUCCESS) {
  2300. status = QDF_STATUS_E_RESOURCES;
  2301. goto fail;
  2302. }
  2303. if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
  2304. status = QDF_STATUS_E_FAILURE;
  2305. goto fail;
  2306. }
  2307. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  2308. /* TODO: Check the destination ring number to be passed to FW */
  2309. soc->cdp_soc.ol_ops->peer_set_default_routing(
  2310. soc->ctrl_psoc,
  2311. peer->vdev->pdev->pdev_id,
  2312. peer->mac_addr.raw,
  2313. peer->vdev->vdev_id, hash_based, reo_dest,
  2314. lmac_peer_id_msb);
  2315. }
  2316. qdf_atomic_set(&peer->is_default_route_set, 1);
  2317. status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
  2318. if (QDF_IS_STATUS_ERROR(status)) {
  2319. dp_peer_err("peer mlo setup failed");
  2320. qdf_assert_always(0);
  2321. }
  2322. if (vdev_opmode != wlan_op_mode_monitor) {
  2323. /* In case of MLD peer, switch peer to mld peer and
  2324. * do peer_rx_init.
  2325. */
  2326. if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
  2327. IS_MLO_DP_LINK_PEER(peer)) {
  2328. if (setup_info && setup_info->is_first_link) {
  2329. mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
  2330. if (mld_peer)
  2331. dp_peer_rx_init(pdev, mld_peer);
  2332. else
  2333. dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
  2334. }
  2335. } else {
  2336. dp_peer_rx_init_wrapper(pdev, peer, setup_info);
  2337. }
  2338. }
  2339. dp_peer_set_local_link_id(peer);
  2340. if (!IS_MLO_DP_MLD_PEER(peer))
  2341. dp_peer_ppdu_delayed_ba_init(peer);
  2342. fail:
  2343. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  2344. return status;
  2345. }
  2346. /**
  2347. * dp_set_ba_aging_timeout() - set ba aging timeout per AC
  2348. * @txrx_soc: cdp soc handle
  2349. * @ac: Access category
  2350. * @value: timeout value in millisec
  2351. *
  2352. * Return: void
  2353. */
  2354. void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  2355. uint8_t ac, uint32_t value)
  2356. {
  2357. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  2358. hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
  2359. }
  2360. /**
  2361. * dp_get_ba_aging_timeout() - get ba aging timeout per AC
  2362. * @txrx_soc: cdp soc handle
  2363. * @ac: access category
  2364. * @value: timeout value in millisec
  2365. *
  2366. * Return: void
  2367. */
  2368. void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
  2369. uint8_t ac, uint32_t *value)
  2370. {
  2371. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  2372. hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
  2373. }
  2374. /**
  2375. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  2376. * @txrx_soc: cdp soc handle
  2377. * @pdev_id: id of physical device object
  2378. * @val: reo destination ring index (1 - 4)
  2379. *
  2380. * Return: QDF_STATUS
  2381. */
  2382. QDF_STATUS
  2383. dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
  2384. enum cdp_host_reo_dest_ring val)
  2385. {
  2386. struct dp_pdev *pdev =
  2387. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
  2388. pdev_id);
  2389. if (pdev) {
  2390. pdev->reo_dest = val;
  2391. return QDF_STATUS_SUCCESS;
  2392. }
  2393. return QDF_STATUS_E_FAILURE;
  2394. }
  2395. /**
  2396. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  2397. * @txrx_soc: cdp soc handle
  2398. * @pdev_id: id of physical device object
  2399. *
  2400. * Return: reo destination ring index
  2401. */
  2402. enum cdp_host_reo_dest_ring
  2403. dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
  2404. {
  2405. struct dp_pdev *pdev =
  2406. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
  2407. pdev_id);
  2408. if (pdev)
  2409. return pdev->reo_dest;
  2410. else
  2411. return cdp_host_reo_dest_ring_unknown;
  2412. }
  2413. void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  2414. union hal_reo_status *reo_status)
  2415. {
  2416. struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
  2417. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  2418. if (!dp_check_pdev_exists(soc, pdev)) {
  2419. dp_err_rl("pdev doesn't exist");
  2420. return;
  2421. }
  2422. if (!qdf_atomic_read(&soc->cmn_init_done))
  2423. return;
  2424. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  2425. DP_PRINT_STATS("REO stats failure %d",
  2426. queue_status->header.status);
  2427. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  2428. return;
  2429. }
  2430. pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
  2431. qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
  2432. }
  2433. /**
  2434. * dp_dump_wbm_idle_hptp() - dump wbm idle ring, hw hp tp info.
  2435. * @soc: dp soc.
  2436. * @pdev: dp pdev.
  2437. *
  2438. * Return: None.
  2439. */
  2440. void
  2441. dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
  2442. {
  2443. uint32_t hw_head;
  2444. uint32_t hw_tail;
  2445. struct dp_srng *srng;
  2446. if (!soc) {
  2447. dp_err("soc is NULL");
  2448. return;
  2449. }
  2450. if (!pdev) {
  2451. dp_err("pdev is NULL");
  2452. return;
  2453. }
  2454. srng = &pdev->soc->wbm_idle_link_ring;
  2455. if (!srng) {
  2456. dp_err("wbm_idle_link_ring srng is NULL");
  2457. return;
  2458. }
  2459. hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
  2460. &hw_tail, WBM_IDLE_LINK);
  2461. dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
  2462. hw_head, hw_tail);
  2463. }
  2464. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  2465. static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
  2466. uint32_t rx_limit)
  2467. {
  2468. soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
  2469. soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
  2470. }
  2471. #else
  2472. static inline
  2473. void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
  2474. uint32_t rx_limit)
  2475. {
  2476. }
  2477. #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
  2478. /**
  2479. * dp_display_srng_info() - Dump the srng HP TP info
  2480. * @soc_hdl: CDP Soc handle
  2481. *
  2482. * This function dumps the SW hp/tp values for the important rings.
  2483. * HW hp/tp values are not being dumped, since it can lead to
  2484. * READ NOC error when UMAC is in low power state. MCC does not have
  2485. * device force wake working yet.
  2486. *
  2487. * Return: rings are empty
  2488. */
  2489. bool dp_display_srng_info(struct cdp_soc_t *soc_hdl)
  2490. {
  2491. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2492. hal_soc_handle_t hal_soc = soc->hal_soc;
  2493. uint32_t hp, tp, i;
  2494. bool ret = true;
  2495. dp_info("SRNG HP-TP data:");
  2496. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2497. hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
  2498. &tp, &hp);
  2499. dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
  2500. if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
  2501. INVALID_WBM_RING_NUM)
  2502. continue;
  2503. hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
  2504. &tp, &hp);
  2505. dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
  2506. }
  2507. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  2508. hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
  2509. &tp, &hp);
  2510. if (hp != tp)
  2511. ret = false;
  2512. dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
  2513. }
  2514. hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
  2515. dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
  2516. hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
  2517. dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
  2518. hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
  2519. dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
  2520. return ret;
  2521. }
  2522. /**
  2523. * dp_set_pdev_pcp_tid_map_wifi3() - update pcp tid map in pdev
  2524. * @psoc: dp soc handle
  2525. * @pdev_id: id of DP_PDEV handle
  2526. * @pcp: pcp value
  2527. * @tid: tid value passed by the user
  2528. *
  2529. * Return: QDF_STATUS_SUCCESS on success
  2530. */
  2531. QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
  2532. uint8_t pdev_id,
  2533. uint8_t pcp, uint8_t tid)
  2534. {
  2535. struct dp_soc *soc = (struct dp_soc *)psoc;
  2536. soc->pcp_tid_map[pcp] = tid;
  2537. hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
  2538. return QDF_STATUS_SUCCESS;
  2539. }
  2540. /**
  2541. * dp_set_vdev_pcp_tid_map_wifi3() - update pcp tid map in vdev
  2542. * @soc_hdl: DP soc handle
  2543. * @vdev_id: id of DP_VDEV handle
  2544. * @pcp: pcp value
  2545. * @tid: tid value passed by the user
  2546. *
  2547. * Return: QDF_STATUS_SUCCESS on success
  2548. */
  2549. QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
  2550. uint8_t vdev_id,
  2551. uint8_t pcp, uint8_t tid)
  2552. {
  2553. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2554. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2555. DP_MOD_ID_CDP);
  2556. if (!vdev)
  2557. return QDF_STATUS_E_FAILURE;
  2558. vdev->pcp_tid_map[pcp] = tid;
  2559. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  2560. return QDF_STATUS_SUCCESS;
  2561. }
  2562. #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
  2563. QDF_STATUS dp_drain_txrx(struct cdp_soc_t *soc_handle, uint8_t rx_only)
  2564. {
  2565. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  2566. uint32_t cur_tx_limit, cur_rx_limit;
  2567. uint32_t budget = 0xffff;
  2568. uint32_t val;
  2569. int i;
  2570. int cpu = dp_srng_get_cpu();
  2571. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2572. cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
  2573. cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
  2574. /* Temporarily increase soft irq limits when going to drain
  2575. * the UMAC/LMAC SRNGs and restore them after polling.
  2576. * Though the budget is on higher side, the TX/RX reaping loops
  2577. * will not execute longer as both TX and RX would be suspended
  2578. * by the time this API is called.
  2579. */
  2580. dp_update_soft_irq_limits(soc, budget, budget);
  2581. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  2582. if (rx_only && !soc->intr_ctx[i].rx_ring_mask)
  2583. continue;
  2584. soc->arch_ops.dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
  2585. }
  2586. dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
  2587. status = hif_try_complete_dp_tasks(soc->hif_handle);
  2588. if (QDF_IS_STATUS_ERROR(status)) {
  2589. dp_err("Failed to complete DP tasks");
  2590. return status;
  2591. }
  2592. /* Do a dummy read at offset 0; this will ensure all
  2593. * pendings writes(HP/TP) are flushed before read returns.
  2594. */
  2595. val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
  2596. dp_debug("Register value at offset 0: %u", val);
  2597. return status;
  2598. }
  2599. #endif
  2600. #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
  2601. /**
  2602. * dp_flush_ring_hptp() - Update ring shadow
  2603. * register HP/TP address when runtime
  2604. * resume
  2605. * @soc: DP soc context
  2606. * @hal_srng: srng
  2607. *
  2608. * Return: None
  2609. */
  2610. static void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
  2611. {
  2612. if (hal_srng && hal_srng_get_clear_event(hal_srng,
  2613. HAL_SRNG_FLUSH_EVENT)) {
  2614. /* Acquire the lock */
  2615. hal_srng_access_start(soc->hal_soc, hal_srng);
  2616. hal_srng_access_end(soc->hal_soc, hal_srng);
  2617. hal_srng_set_flush_last_ts(hal_srng);
  2618. dp_debug("flushed");
  2619. }
  2620. }
  2621. void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx)
  2622. {
  2623. uint8_t i;
  2624. if (force_flush_tx) {
  2625. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  2626. hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
  2627. HAL_SRNG_FLUSH_EVENT);
  2628. dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
  2629. }
  2630. return;
  2631. }
  2632. for (i = 0; i < soc->num_tcl_data_rings; i++)
  2633. dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
  2634. dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
  2635. }
  2636. #endif
  2637. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  2638. /*
  2639. * dp_flush_tcl_ring() - flush TCL ring hp
  2640. * @pdev: dp pdev
  2641. * @ring_id: TCL ring id
  2642. *
  2643. * Return: 0 on success and error code on failure
  2644. */
  2645. int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
  2646. {
  2647. struct dp_soc *soc = pdev->soc;
  2648. hal_ring_handle_t hal_ring_hdl =
  2649. soc->tcl_data_ring[ring_id].hal_srng;
  2650. int ret;
  2651. ret = hal_srng_try_access_start(soc->hal_soc, hal_ring_hdl);
  2652. if (ret)
  2653. return ret;
  2654. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  2655. if (ret) {
  2656. hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
  2657. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  2658. hal_srng_inc_flush_cnt(hal_ring_hdl);
  2659. return ret;
  2660. }
  2661. hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
  2662. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  2663. return ret;
  2664. }
  2665. #else
  2666. int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
  2667. {
  2668. return QDF_STATUS_SUCCESS;
  2669. }
  2670. #endif
  2671. #ifdef WLAN_FEATURE_STATS_EXT
  2672. /* rx hw stats event wait timeout in ms */
  2673. #define DP_REO_STATUS_STATS_TIMEOUT 100
  2674. /**
  2675. * dp_rx_hw_stats_cb() - request rx hw stats response callback
  2676. * @soc: soc handle
  2677. * @cb_ctxt: callback context
  2678. * @reo_status: reo command response status
  2679. *
  2680. * Return: None
  2681. */
  2682. static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  2683. union hal_reo_status *reo_status)
  2684. {
  2685. struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
  2686. bool is_query_timeout;
  2687. qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
  2688. is_query_timeout = soc->rx_hw_stats->is_query_timeout;
  2689. /* free the cb_ctxt if all pending tid stats query is received */
  2690. if (qdf_atomic_dec_and_test(&soc->rx_hw_stats->pending_tid_stats_cnt)) {
  2691. if (!is_query_timeout) {
  2692. qdf_event_set(&soc->rx_hw_stats_event);
  2693. soc->is_last_stats_ctx_init = false;
  2694. }
  2695. qdf_mem_free(soc->rx_hw_stats);
  2696. soc->rx_hw_stats = NULL;
  2697. }
  2698. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  2699. dp_info("REO stats failure %d",
  2700. queue_status->header.status);
  2701. qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
  2702. return;
  2703. }
  2704. if (!is_query_timeout) {
  2705. soc->ext_stats.rx_mpdu_received +=
  2706. queue_status->mpdu_frms_cnt;
  2707. soc->ext_stats.rx_mpdu_missed +=
  2708. queue_status->hole_cnt;
  2709. }
  2710. qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
  2711. }
  2712. /**
  2713. * dp_request_rx_hw_stats() - request rx hardware stats
  2714. * @soc_hdl: soc handle
  2715. * @vdev_id: vdev id
  2716. *
  2717. * Return: None
  2718. */
  2719. QDF_STATUS
  2720. dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
  2721. {
  2722. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  2723. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2724. DP_MOD_ID_CDP);
  2725. struct dp_peer *peer = NULL;
  2726. QDF_STATUS status;
  2727. int rx_stats_sent_cnt = 0;
  2728. uint32_t last_rx_mpdu_received;
  2729. uint32_t last_rx_mpdu_missed;
  2730. if (soc->rx_hw_stats) {
  2731. dp_err_rl("Stats already requested");
  2732. status = QDF_STATUS_E_ALREADY;
  2733. goto out;
  2734. }
  2735. if (!vdev) {
  2736. dp_err("vdev is null for vdev_id: %u", vdev_id);
  2737. status = QDF_STATUS_E_INVAL;
  2738. goto out;
  2739. }
  2740. peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
  2741. if (!peer) {
  2742. dp_err("Peer is NULL");
  2743. status = QDF_STATUS_E_INVAL;
  2744. goto out;
  2745. }
  2746. soc->rx_hw_stats = qdf_mem_malloc(sizeof(*soc->rx_hw_stats));
  2747. if (!soc->rx_hw_stats) {
  2748. dp_err("malloc failed for hw stats structure");
  2749. status = QDF_STATUS_E_INVAL;
  2750. goto out;
  2751. }
  2752. qdf_event_reset(&soc->rx_hw_stats_event);
  2753. qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
  2754. /* save the last soc cumulative stats and reset it to 0 */
  2755. last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
  2756. last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
  2757. soc->ext_stats.rx_mpdu_received = 0;
  2758. soc->ext_stats.rx_mpdu_missed = 0;
  2759. dp_debug("HW stats query start");
  2760. rx_stats_sent_cnt =
  2761. dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, soc->rx_hw_stats);
  2762. if (!rx_stats_sent_cnt) {
  2763. dp_err("no tid stats sent successfully");
  2764. qdf_mem_free(soc->rx_hw_stats);
  2765. soc->rx_hw_stats = NULL;
  2766. qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
  2767. status = QDF_STATUS_E_INVAL;
  2768. goto out;
  2769. }
  2770. qdf_atomic_set(&soc->rx_hw_stats->pending_tid_stats_cnt,
  2771. rx_stats_sent_cnt);
  2772. soc->rx_hw_stats->is_query_timeout = false;
  2773. soc->is_last_stats_ctx_init = true;
  2774. qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
  2775. status = qdf_wait_single_event(&soc->rx_hw_stats_event,
  2776. DP_REO_STATUS_STATS_TIMEOUT);
  2777. dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
  2778. qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
  2779. if (status != QDF_STATUS_SUCCESS) {
  2780. if (soc->rx_hw_stats) {
  2781. dp_info("partial rx hw stats event collected with %d",
  2782. qdf_atomic_read(
  2783. &soc->rx_hw_stats->pending_tid_stats_cnt));
  2784. if (soc->is_last_stats_ctx_init)
  2785. soc->rx_hw_stats->is_query_timeout = true;
  2786. }
  2787. /*
  2788. * If query timeout happened, use the last saved stats
  2789. * for this time query.
  2790. */
  2791. soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
  2792. soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
  2793. DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
  2794. }
  2795. qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
  2796. out:
  2797. if (peer)
  2798. dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
  2799. if (vdev)
  2800. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  2801. DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
  2802. return status;
  2803. }
  2804. /**
  2805. * dp_reset_rx_hw_ext_stats() - Reset rx hardware ext stats
  2806. * @soc_hdl: soc handle
  2807. *
  2808. * Return: None
  2809. */
  2810. void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
  2811. {
  2812. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  2813. soc->ext_stats.rx_mpdu_received = 0;
  2814. soc->ext_stats.rx_mpdu_missed = 0;
  2815. }
  2816. #endif /* WLAN_FEATURE_STATS_EXT */
  2817. uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
  2818. {
  2819. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  2820. return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
  2821. }
  2822. void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
  2823. {
  2824. uint32_t i;
  2825. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  2826. soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
  2827. }
  2828. }
  2829. qdf_export_symbol(dp_soc_set_txrx_ring_map);
  2830. static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
  2831. {
  2832. dp_init_info("DP soc Dump for Target = %d", target_type);
  2833. dp_init_info("ast_override_support = %d da_war_enabled = %d",
  2834. soc->ast_override_support, soc->da_war_enabled);
  2835. wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
  2836. }
  2837. /**
  2838. * dp_soc_cfg_init() - initialize target specific configuration
  2839. * during dp_soc_init
  2840. * @soc: dp soc handle
  2841. */
  2842. static void dp_soc_cfg_init(struct dp_soc *soc)
  2843. {
  2844. uint32_t target_type;
  2845. target_type = hal_get_target_type(soc->hal_soc);
  2846. switch (target_type) {
  2847. case TARGET_TYPE_QCA6290:
  2848. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  2849. REO_DST_RING_SIZE_QCA6290);
  2850. soc->ast_override_support = 1;
  2851. soc->da_war_enabled = false;
  2852. break;
  2853. case TARGET_TYPE_QCA6390:
  2854. case TARGET_TYPE_QCA6490:
  2855. case TARGET_TYPE_QCA6750:
  2856. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  2857. REO_DST_RING_SIZE_QCA6290);
  2858. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  2859. soc->ast_override_support = 1;
  2860. if (soc->cdp_soc.ol_ops->get_con_mode &&
  2861. soc->cdp_soc.ol_ops->get_con_mode() ==
  2862. QDF_GLOBAL_MONITOR_MODE) {
  2863. int int_ctx;
  2864. for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
  2865. soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
  2866. soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
  2867. }
  2868. }
  2869. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  2870. break;
  2871. case TARGET_TYPE_KIWI:
  2872. case TARGET_TYPE_MANGO:
  2873. case TARGET_TYPE_PEACH:
  2874. soc->ast_override_support = 1;
  2875. soc->per_tid_basize_max_tid = 8;
  2876. if (soc->cdp_soc.ol_ops->get_con_mode &&
  2877. soc->cdp_soc.ol_ops->get_con_mode() ==
  2878. QDF_GLOBAL_MONITOR_MODE) {
  2879. int int_ctx;
  2880. for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
  2881. int_ctx++) {
  2882. soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
  2883. if (dp_is_monitor_mode_using_poll(soc))
  2884. soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
  2885. }
  2886. }
  2887. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  2888. soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
  2889. break;
  2890. case TARGET_TYPE_QCA8074:
  2891. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
  2892. soc->da_war_enabled = true;
  2893. soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
  2894. break;
  2895. case TARGET_TYPE_QCA8074V2:
  2896. case TARGET_TYPE_QCA6018:
  2897. case TARGET_TYPE_QCA9574:
  2898. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  2899. soc->ast_override_support = 1;
  2900. soc->per_tid_basize_max_tid = 8;
  2901. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  2902. soc->da_war_enabled = false;
  2903. soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
  2904. break;
  2905. case TARGET_TYPE_QCN9000:
  2906. soc->ast_override_support = 1;
  2907. soc->da_war_enabled = false;
  2908. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  2909. soc->per_tid_basize_max_tid = 8;
  2910. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  2911. soc->lmac_polled_mode = 0;
  2912. soc->wbm_release_desc_rx_sg_support = 1;
  2913. soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
  2914. break;
  2915. case TARGET_TYPE_QCA5018:
  2916. case TARGET_TYPE_QCN6122:
  2917. case TARGET_TYPE_QCN9160:
  2918. soc->ast_override_support = 1;
  2919. soc->da_war_enabled = false;
  2920. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  2921. soc->per_tid_basize_max_tid = 8;
  2922. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
  2923. soc->disable_mac1_intr = 1;
  2924. soc->disable_mac2_intr = 1;
  2925. soc->wbm_release_desc_rx_sg_support = 1;
  2926. break;
  2927. case TARGET_TYPE_QCN9224:
  2928. soc->umac_reset_supported = true;
  2929. soc->ast_override_support = 1;
  2930. soc->da_war_enabled = false;
  2931. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  2932. soc->per_tid_basize_max_tid = 8;
  2933. soc->wbm_release_desc_rx_sg_support = 1;
  2934. soc->rxdma2sw_rings_not_supported = 1;
  2935. soc->wbm_sg_last_msdu_war = 1;
  2936. soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
  2937. soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
  2938. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
  2939. wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
  2940. soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
  2941. CFG_DP_HOST_AST_DB_ENABLE);
  2942. soc->features.wds_ext_ast_override_enable = true;
  2943. break;
  2944. case TARGET_TYPE_QCA5332:
  2945. case TARGET_TYPE_QCN6432:
  2946. soc->umac_reset_supported = true;
  2947. soc->ast_override_support = 1;
  2948. soc->da_war_enabled = false;
  2949. wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
  2950. soc->per_tid_basize_max_tid = 8;
  2951. soc->wbm_release_desc_rx_sg_support = 1;
  2952. soc->rxdma2sw_rings_not_supported = 1;
  2953. soc->wbm_sg_last_msdu_war = 1;
  2954. soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
  2955. soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
  2956. soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
  2957. wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
  2958. soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
  2959. CFG_DP_HOST_AST_DB_ENABLE);
  2960. soc->features.wds_ext_ast_override_enable = true;
  2961. break;
  2962. default:
  2963. qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
  2964. qdf_assert_always(0);
  2965. break;
  2966. }
  2967. dp_soc_cfg_dump(soc, target_type);
  2968. }
  2969. /**
  2970. * dp_soc_get_ap_mld_mode() - store ap mld mode from ini
  2971. * @soc: Opaque DP SOC handle
  2972. *
  2973. * Return: none
  2974. */
  2975. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  2976. static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
  2977. {
  2978. if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
  2979. soc->mld_mode_ap =
  2980. soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  2981. CDP_CFG_MLD_NETDEV_MODE_AP);
  2982. }
  2983. dp_info("DP mld_mode_ap-%u\n", soc->mld_mode_ap);
  2984. }
  2985. #else
  2986. static inline void dp_soc_get_ap_mld_mode(struct dp_soc *soc)
  2987. {
  2988. (void)soc;
  2989. }
  2990. #endif
  2991. #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
  2992. /**
  2993. * dp_soc_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_soc
  2994. * @soc: Datapath soc handle
  2995. *
  2996. * Return: none
  2997. */
  2998. static inline
  2999. void dp_soc_hw_txrx_stats_init(struct dp_soc *soc)
  3000. {
  3001. soc->hw_txrx_stats_en =
  3002. wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
  3003. }
  3004. #else
  3005. static inline
  3006. void dp_soc_hw_txrx_stats_init(struct dp_soc *soc)
  3007. {
  3008. soc->hw_txrx_stats_en = 0;
  3009. }
  3010. #endif
  3011. /**
  3012. * dp_soc_init() - Initialize txrx SOC
  3013. * @soc: Opaque DP SOC handle
  3014. * @htc_handle: Opaque HTC handle
  3015. * @hif_handle: Opaque HIF handle
  3016. *
  3017. * Return: DP SOC handle on success, NULL on failure
  3018. */
  3019. void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
  3020. struct hif_opaque_softc *hif_handle)
  3021. {
  3022. struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
  3023. bool is_monitor_mode = false;
  3024. uint8_t i;
  3025. int num_dp_msi;
  3026. bool ppeds_attached = false;
  3027. htt_soc = htt_soc_attach(soc, htc_handle);
  3028. if (!htt_soc)
  3029. goto fail1;
  3030. soc->htt_handle = htt_soc;
  3031. if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
  3032. goto fail2;
  3033. htt_set_htc_handle(htt_soc, htc_handle);
  3034. dp_soc_cfg_init(soc);
  3035. dp_monitor_soc_cfg_init(soc);
  3036. /* Reset/Initialize wbm sg list and flags */
  3037. dp_rx_wbm_sg_list_reset(soc);
  3038. /* Note: Any SRNG ring initialization should happen only after
  3039. * Interrupt mode is set and followed by filling up the
  3040. * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
  3041. */
  3042. dp_soc_set_interrupt_mode(soc);
  3043. if (soc->cdp_soc.ol_ops->get_con_mode &&
  3044. soc->cdp_soc.ol_ops->get_con_mode() ==
  3045. QDF_GLOBAL_MONITOR_MODE) {
  3046. is_monitor_mode = true;
  3047. soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
  3048. } else {
  3049. soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
  3050. }
  3051. num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
  3052. if (num_dp_msi < 0) {
  3053. dp_init_err("%pK: dp_interrupt assignment failed", soc);
  3054. goto fail3;
  3055. }
  3056. if (soc->arch_ops.ppeds_handle_attached)
  3057. ppeds_attached = soc->arch_ops.ppeds_handle_attached(soc);
  3058. wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
  3059. soc->intr_mode, is_monitor_mode,
  3060. ppeds_attached,
  3061. soc->umac_reset_supported);
  3062. /* initialize WBM_IDLE_LINK ring */
  3063. if (dp_hw_link_desc_ring_init(soc)) {
  3064. dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
  3065. goto fail3;
  3066. }
  3067. dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
  3068. if (dp_soc_srng_init(soc)) {
  3069. dp_init_err("%pK: dp_soc_srng_init failed", soc);
  3070. goto fail4;
  3071. }
  3072. if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
  3073. htt_get_htc_handle(htt_soc),
  3074. soc->hal_soc, soc->osdev) == NULL)
  3075. goto fail5;
  3076. /* Initialize descriptors in TCL Rings */
  3077. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3078. hal_tx_init_data_ring(soc->hal_soc,
  3079. soc->tcl_data_ring[i].hal_srng);
  3080. }
  3081. if (dp_soc_tx_desc_sw_pools_init(soc)) {
  3082. dp_init_err("%pK: dp_tx_soc_attach failed", soc);
  3083. goto fail6;
  3084. }
  3085. if (soc->arch_ops.txrx_soc_ppeds_start) {
  3086. if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
  3087. dp_init_err("%pK: ppeds start failed", soc);
  3088. goto fail7;
  3089. }
  3090. }
  3091. wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
  3092. cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
  3093. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  3094. wlan_cfg_set_rx_rr(soc->wlan_cfg_ctx,
  3095. cfg_get(soc->ctrl_psoc, CFG_DP_RX_RR));
  3096. #endif
  3097. soc->cce_disable = false;
  3098. soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
  3099. soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
  3100. qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
  3101. qdf_spinlock_create(&soc->vdev_map_lock);
  3102. qdf_atomic_init(&soc->num_tx_outstanding);
  3103. qdf_atomic_init(&soc->num_tx_exception);
  3104. soc->num_tx_allowed =
  3105. wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
  3106. soc->num_tx_spl_allowed =
  3107. wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
  3108. soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
  3109. if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
  3110. int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  3111. CDP_CFG_MAX_PEER_ID);
  3112. if (ret != -EINVAL)
  3113. wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
  3114. ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
  3115. CDP_CFG_CCE_DISABLE);
  3116. if (ret == 1)
  3117. soc->cce_disable = true;
  3118. }
  3119. /*
  3120. * Skip registering hw ring interrupts for WMAC2 on IPQ6018
  3121. * and IPQ5018 WMAC2 is not there in these platforms.
  3122. */
  3123. if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
  3124. soc->disable_mac2_intr)
  3125. dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
  3126. /*
  3127. * Skip registering hw ring interrupts for WMAC1 on IPQ5018
  3128. * WMAC1 is not there in this platform.
  3129. */
  3130. if (soc->disable_mac1_intr)
  3131. dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
  3132. /* setup the global rx defrag waitlist */
  3133. TAILQ_INIT(&soc->rx.defrag.waitlist);
  3134. soc->rx.defrag.timeout_ms =
  3135. wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
  3136. soc->rx.defrag.next_flush_ms = 0;
  3137. soc->rx.flags.defrag_timeout_check =
  3138. wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
  3139. qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
  3140. dp_monitor_soc_init(soc);
  3141. qdf_atomic_set(&soc->cmn_init_done, 1);
  3142. qdf_nbuf_queue_init(&soc->htt_stats.msg);
  3143. qdf_spinlock_create(&soc->ast_lock);
  3144. dp_peer_mec_spinlock_create(soc);
  3145. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  3146. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  3147. INIT_RX_HW_STATS_LOCK(soc);
  3148. qdf_nbuf_queue_init(&soc->invalid_buf_queue);
  3149. /* fill the tx/rx cpu ring map*/
  3150. dp_soc_set_txrx_ring_map(soc);
  3151. TAILQ_INIT(&soc->inactive_peer_list);
  3152. qdf_spinlock_create(&soc->inactive_peer_list_lock);
  3153. TAILQ_INIT(&soc->inactive_vdev_list);
  3154. qdf_spinlock_create(&soc->inactive_vdev_list_lock);
  3155. qdf_spinlock_create(&soc->htt_stats.lock);
  3156. /* initialize work queue for stats processing */
  3157. qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
  3158. dp_reo_desc_deferred_freelist_create(soc);
  3159. dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
  3160. qdf_dma_mem_stats_read(),
  3161. qdf_heap_mem_stats_read(),
  3162. qdf_skb_total_mem_stats_read());
  3163. soc->vdev_stats_id_map = 0;
  3164. dp_soc_hw_txrx_stats_init(soc);
  3165. dp_soc_get_ap_mld_mode(soc);
  3166. return soc;
  3167. fail7:
  3168. dp_soc_tx_desc_sw_pools_deinit(soc);
  3169. fail6:
  3170. htt_soc_htc_dealloc(soc->htt_handle);
  3171. fail5:
  3172. dp_soc_srng_deinit(soc);
  3173. fail4:
  3174. dp_hw_link_desc_ring_deinit(soc);
  3175. fail3:
  3176. htt_htc_pkt_pool_free(htt_soc);
  3177. fail2:
  3178. htt_soc_detach(htt_soc);
  3179. fail1:
  3180. return NULL;
  3181. }
  3182. #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
  3183. static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
  3184. {
  3185. QDF_STATUS status;
  3186. if (soc->init_tcl_cmd_cred_ring) {
  3187. status = dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
  3188. TCL_CMD_CREDIT, 0, 0);
  3189. if (QDF_IS_STATUS_ERROR(status))
  3190. return status;
  3191. wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
  3192. soc->tcl_cmd_credit_ring.alloc_size,
  3193. soc->ctrl_psoc,
  3194. WLAN_MD_DP_SRNG_TCL_CMD,
  3195. "wbm_desc_rel_ring");
  3196. }
  3197. return QDF_STATUS_SUCCESS;
  3198. }
  3199. static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
  3200. {
  3201. if (soc->init_tcl_cmd_cred_ring) {
  3202. wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
  3203. soc->tcl_cmd_credit_ring.alloc_size,
  3204. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
  3205. "wbm_desc_rel_ring");
  3206. dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
  3207. TCL_CMD_CREDIT, 0);
  3208. }
  3209. }
  3210. static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
  3211. {
  3212. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
  3213. uint32_t entries;
  3214. QDF_STATUS status;
  3215. entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
  3216. if (soc->init_tcl_cmd_cred_ring) {
  3217. status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
  3218. TCL_CMD_CREDIT, entries, 0);
  3219. if (QDF_IS_STATUS_ERROR(status))
  3220. return status;
  3221. }
  3222. return QDF_STATUS_SUCCESS;
  3223. }
  3224. static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
  3225. {
  3226. if (soc->init_tcl_cmd_cred_ring)
  3227. dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
  3228. }
  3229. inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
  3230. {
  3231. if (soc->init_tcl_cmd_cred_ring)
  3232. hal_tx_init_cmd_credit_ring(soc->hal_soc,
  3233. soc->tcl_cmd_credit_ring.hal_srng);
  3234. }
  3235. #else
  3236. static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
  3237. {
  3238. return QDF_STATUS_SUCCESS;
  3239. }
  3240. static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
  3241. {
  3242. }
  3243. static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
  3244. {
  3245. return QDF_STATUS_SUCCESS;
  3246. }
  3247. static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
  3248. {
  3249. }
  3250. inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
  3251. {
  3252. }
  3253. #endif
  3254. #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
  3255. static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
  3256. {
  3257. QDF_STATUS status;
  3258. status = dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
  3259. if (QDF_IS_STATUS_ERROR(status))
  3260. return status;
  3261. wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
  3262. soc->tcl_status_ring.alloc_size,
  3263. soc->ctrl_psoc,
  3264. WLAN_MD_DP_SRNG_TCL_STATUS,
  3265. "wbm_desc_rel_ring");
  3266. return QDF_STATUS_SUCCESS;
  3267. }
  3268. static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
  3269. {
  3270. wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
  3271. soc->tcl_status_ring.alloc_size,
  3272. soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
  3273. "wbm_desc_rel_ring");
  3274. dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  3275. }
  3276. static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
  3277. {
  3278. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
  3279. uint32_t entries;
  3280. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3281. entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
  3282. status = dp_srng_alloc(soc, &soc->tcl_status_ring,
  3283. TCL_STATUS, entries, 0);
  3284. return status;
  3285. }
  3286. static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
  3287. {
  3288. dp_srng_free(soc, &soc->tcl_status_ring);
  3289. }
  3290. #else
  3291. static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
  3292. {
  3293. return QDF_STATUS_SUCCESS;
  3294. }
  3295. static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
  3296. {
  3297. }
  3298. static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
  3299. {
  3300. return QDF_STATUS_SUCCESS;
  3301. }
  3302. static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
  3303. {
  3304. }
  3305. #endif
  3306. /**
  3307. * dp_soc_srng_deinit() - de-initialize soc srng rings
  3308. * @soc: Datapath soc handle
  3309. *
  3310. */
  3311. void dp_soc_srng_deinit(struct dp_soc *soc)
  3312. {
  3313. uint32_t i;
  3314. if (soc->arch_ops.txrx_soc_srng_deinit)
  3315. soc->arch_ops.txrx_soc_srng_deinit(soc);
  3316. /* Free the ring memories */
  3317. /* Common rings */
  3318. wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
  3319. soc->wbm_desc_rel_ring.alloc_size,
  3320. soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
  3321. "wbm_desc_rel_ring");
  3322. dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  3323. dp_ssr_dump_srng_unregister("wbm_desc_rel_ring", -1);
  3324. /* Tx data rings */
  3325. for (i = 0; i < soc->num_tcl_data_rings; i++)
  3326. dp_deinit_tx_pair_by_index(soc, i);
  3327. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  3328. dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
  3329. dp_ipa_deinit_alt_tx_ring(soc);
  3330. }
  3331. /* TCL command and status rings */
  3332. dp_soc_tcl_cmd_cred_srng_deinit(soc);
  3333. dp_soc_tcl_status_srng_deinit(soc);
  3334. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3335. /* TODO: Get number of rings and ring sizes
  3336. * from wlan_cfg
  3337. */
  3338. dp_ssr_dump_srng_unregister("reo_dest_ring", i);
  3339. wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
  3340. soc->reo_dest_ring[i].alloc_size,
  3341. soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
  3342. "reo_dest_ring");
  3343. dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
  3344. }
  3345. dp_ssr_dump_srng_unregister("reo_reinject_ring", -1);
  3346. /* REO reinjection ring */
  3347. wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
  3348. soc->reo_reinject_ring.alloc_size,
  3349. soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
  3350. "reo_reinject_ring");
  3351. dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  3352. dp_ssr_dump_srng_unregister("rx_rel_ring", -1);
  3353. /* Rx release ring */
  3354. wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
  3355. soc->rx_rel_ring.alloc_size,
  3356. soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
  3357. "reo_release_ring");
  3358. dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  3359. /* Rx exception ring */
  3360. /* TODO: Better to store ring_type and ring_num in
  3361. * dp_srng during setup
  3362. */
  3363. dp_ssr_dump_srng_unregister("reo_exception_ring", -1);
  3364. wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
  3365. soc->reo_exception_ring.alloc_size,
  3366. soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
  3367. "reo_exception_ring");
  3368. dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  3369. /* REO command and status rings */
  3370. dp_ssr_dump_srng_unregister("reo_cmd_ring", -1);
  3371. wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
  3372. soc->reo_cmd_ring.alloc_size,
  3373. soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
  3374. "reo_cmd_ring");
  3375. dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  3376. dp_ssr_dump_srng_unregister("reo_status_ring", -1);
  3377. wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
  3378. soc->reo_status_ring.alloc_size,
  3379. soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
  3380. "reo_status_ring");
  3381. dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
  3382. }
  3383. /**
  3384. * dp_soc_srng_init() - Initialize soc level srng rings
  3385. * @soc: Datapath soc handle
  3386. *
  3387. * Return: QDF_STATUS_SUCCESS on success
  3388. * QDF_STATUS_E_FAILURE on failure
  3389. */
  3390. QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
  3391. {
  3392. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  3393. uint8_t i;
  3394. uint8_t wbm2_sw_rx_rel_ring_id;
  3395. soc_cfg_ctx = soc->wlan_cfg_ctx;
  3396. dp_enable_verbose_debug(soc);
  3397. /* WBM descriptor release ring */
  3398. if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
  3399. dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
  3400. goto fail1;
  3401. }
  3402. dp_ssr_dump_srng_register("wbm_desc_rel_ring",
  3403. &soc->wbm_desc_rel_ring, -1);
  3404. wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
  3405. soc->wbm_desc_rel_ring.alloc_size,
  3406. soc->ctrl_psoc,
  3407. WLAN_MD_DP_SRNG_WBM_DESC_REL,
  3408. "wbm_desc_rel_ring");
  3409. /* TCL command and status rings */
  3410. if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
  3411. dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
  3412. goto fail1;
  3413. }
  3414. if (dp_soc_tcl_status_srng_init(soc)) {
  3415. dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
  3416. goto fail1;
  3417. }
  3418. /* REO reinjection ring */
  3419. if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
  3420. dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
  3421. goto fail1;
  3422. }
  3423. dp_ssr_dump_srng_register("reo_reinject_ring",
  3424. &soc->reo_reinject_ring, -1);
  3425. wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
  3426. soc->reo_reinject_ring.alloc_size,
  3427. soc->ctrl_psoc,
  3428. WLAN_MD_DP_SRNG_REO_REINJECT,
  3429. "reo_reinject_ring");
  3430. wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
  3431. /* Rx release ring */
  3432. if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
  3433. wbm2_sw_rx_rel_ring_id, 0)) {
  3434. dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
  3435. goto fail1;
  3436. }
  3437. dp_ssr_dump_srng_register("rx_rel_ring", &soc->rx_rel_ring, -1);
  3438. wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
  3439. soc->rx_rel_ring.alloc_size,
  3440. soc->ctrl_psoc,
  3441. WLAN_MD_DP_SRNG_RX_REL,
  3442. "reo_release_ring");
  3443. /* Rx exception ring */
  3444. if (dp_srng_init(soc, &soc->reo_exception_ring,
  3445. REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
  3446. dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
  3447. goto fail1;
  3448. }
  3449. dp_ssr_dump_srng_register("reo_exception_ring",
  3450. &soc->reo_exception_ring, -1);
  3451. wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
  3452. soc->reo_exception_ring.alloc_size,
  3453. soc->ctrl_psoc,
  3454. WLAN_MD_DP_SRNG_REO_EXCEPTION,
  3455. "reo_exception_ring");
  3456. /* REO command and status rings */
  3457. if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
  3458. dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
  3459. goto fail1;
  3460. }
  3461. dp_ssr_dump_srng_register("reo_cmd_ring", &soc->reo_cmd_ring, -1);
  3462. wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
  3463. soc->reo_cmd_ring.alloc_size,
  3464. soc->ctrl_psoc,
  3465. WLAN_MD_DP_SRNG_REO_CMD,
  3466. "reo_cmd_ring");
  3467. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  3468. TAILQ_INIT(&soc->rx.reo_cmd_list);
  3469. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  3470. if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
  3471. dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
  3472. goto fail1;
  3473. }
  3474. dp_ssr_dump_srng_register("reo_status_ring", &soc->reo_status_ring, -1);
  3475. wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
  3476. soc->reo_status_ring.alloc_size,
  3477. soc->ctrl_psoc,
  3478. WLAN_MD_DP_SRNG_REO_STATUS,
  3479. "reo_status_ring");
  3480. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3481. if (dp_init_tx_ring_pair_by_index(soc, i))
  3482. goto fail1;
  3483. }
  3484. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  3485. if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
  3486. goto fail1;
  3487. if (dp_ipa_init_alt_tx_ring(soc))
  3488. goto fail1;
  3489. }
  3490. dp_create_ext_stats_event(soc);
  3491. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3492. /* Initialize REO destination ring */
  3493. if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
  3494. dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
  3495. goto fail1;
  3496. }
  3497. dp_ssr_dump_srng_register("reo_dest_ring",
  3498. &soc->reo_dest_ring[i], i);
  3499. wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
  3500. soc->reo_dest_ring[i].alloc_size,
  3501. soc->ctrl_psoc,
  3502. WLAN_MD_DP_SRNG_REO_DEST,
  3503. "reo_dest_ring");
  3504. }
  3505. if (soc->arch_ops.txrx_soc_srng_init) {
  3506. if (soc->arch_ops.txrx_soc_srng_init(soc)) {
  3507. dp_init_err("%pK: dp_srng_init failed for arch rings",
  3508. soc);
  3509. goto fail1;
  3510. }
  3511. }
  3512. return QDF_STATUS_SUCCESS;
  3513. fail1:
  3514. /*
  3515. * Cleanup will be done as part of soc_detach, which will
  3516. * be called on pdev attach failure
  3517. */
  3518. dp_soc_srng_deinit(soc);
  3519. return QDF_STATUS_E_FAILURE;
  3520. }
  3521. /**
  3522. * dp_soc_srng_free() - free soc level srng rings
  3523. * @soc: Datapath soc handle
  3524. *
  3525. */
  3526. void dp_soc_srng_free(struct dp_soc *soc)
  3527. {
  3528. uint32_t i;
  3529. if (soc->arch_ops.txrx_soc_srng_free)
  3530. soc->arch_ops.txrx_soc_srng_free(soc);
  3531. dp_srng_free(soc, &soc->wbm_desc_rel_ring);
  3532. for (i = 0; i < soc->num_tcl_data_rings; i++)
  3533. dp_free_tx_ring_pair_by_index(soc, i);
  3534. /* Free IPA rings for TCL_TX and TCL_COMPL ring */
  3535. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  3536. dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
  3537. dp_ipa_free_alt_tx_ring(soc);
  3538. }
  3539. dp_soc_tcl_cmd_cred_srng_free(soc);
  3540. dp_soc_tcl_status_srng_free(soc);
  3541. for (i = 0; i < soc->num_reo_dest_rings; i++)
  3542. dp_srng_free(soc, &soc->reo_dest_ring[i]);
  3543. dp_srng_free(soc, &soc->reo_reinject_ring);
  3544. dp_srng_free(soc, &soc->rx_rel_ring);
  3545. dp_srng_free(soc, &soc->reo_exception_ring);
  3546. dp_srng_free(soc, &soc->reo_cmd_ring);
  3547. dp_srng_free(soc, &soc->reo_status_ring);
  3548. }
  3549. /**
  3550. * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
  3551. * @soc: Datapath soc handle
  3552. *
  3553. * Return: QDF_STATUS_SUCCESS on success
  3554. * QDF_STATUS_E_NOMEM on failure
  3555. */
  3556. QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
  3557. {
  3558. uint32_t entries;
  3559. uint32_t i;
  3560. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  3561. uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
  3562. uint32_t reo_dst_ring_size;
  3563. soc_cfg_ctx = soc->wlan_cfg_ctx;
  3564. /* sw2wbm link descriptor release ring */
  3565. entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
  3566. if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
  3567. entries, 0)) {
  3568. dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
  3569. goto fail1;
  3570. }
  3571. /* TCL command and status rings */
  3572. if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
  3573. dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
  3574. goto fail1;
  3575. }
  3576. if (dp_soc_tcl_status_srng_alloc(soc)) {
  3577. dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
  3578. goto fail1;
  3579. }
  3580. /* REO reinjection ring */
  3581. entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
  3582. if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
  3583. entries, 0)) {
  3584. dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
  3585. goto fail1;
  3586. }
  3587. /* Rx release ring */
  3588. entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
  3589. if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
  3590. entries, 0)) {
  3591. dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
  3592. goto fail1;
  3593. }
  3594. /* Rx exception ring */
  3595. entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
  3596. if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
  3597. entries, 0)) {
  3598. dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
  3599. goto fail1;
  3600. }
  3601. /* REO command and status rings */
  3602. entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
  3603. if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
  3604. dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
  3605. goto fail1;
  3606. }
  3607. entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
  3608. if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
  3609. entries, 0)) {
  3610. dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
  3611. goto fail1;
  3612. }
  3613. reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
  3614. /* Disable cached desc if NSS offload is enabled */
  3615. if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
  3616. cached = 0;
  3617. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3618. if (dp_alloc_tx_ring_pair_by_index(soc, i))
  3619. goto fail1;
  3620. }
  3621. /* IPA rings for TCL_TX and TX_COMP will be allocated here */
  3622. if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
  3623. if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
  3624. goto fail1;
  3625. if (dp_ipa_alloc_alt_tx_ring(soc))
  3626. goto fail1;
  3627. }
  3628. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  3629. /* Setup REO destination ring */
  3630. if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
  3631. reo_dst_ring_size, cached)) {
  3632. dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
  3633. goto fail1;
  3634. }
  3635. }
  3636. if (soc->arch_ops.txrx_soc_srng_alloc) {
  3637. if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
  3638. dp_init_err("%pK: dp_srng_alloc failed for arch rings",
  3639. soc);
  3640. goto fail1;
  3641. }
  3642. }
  3643. return QDF_STATUS_SUCCESS;
  3644. fail1:
  3645. dp_soc_srng_free(soc);
  3646. return QDF_STATUS_E_NOMEM;
  3647. }
  3648. /**
  3649. * dp_soc_cfg_attach() - set target specific configuration in
  3650. * dp soc cfg.
  3651. * @soc: dp soc handle
  3652. */
  3653. void dp_soc_cfg_attach(struct dp_soc *soc)
  3654. {
  3655. int target_type;
  3656. int nss_cfg = 0;
  3657. target_type = hal_get_target_type(soc->hal_soc);
  3658. switch (target_type) {
  3659. case TARGET_TYPE_QCA6290:
  3660. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  3661. REO_DST_RING_SIZE_QCA6290);
  3662. break;
  3663. case TARGET_TYPE_QCA6390:
  3664. case TARGET_TYPE_QCA6490:
  3665. case TARGET_TYPE_QCA6750:
  3666. wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
  3667. REO_DST_RING_SIZE_QCA6290);
  3668. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  3669. break;
  3670. case TARGET_TYPE_KIWI:
  3671. case TARGET_TYPE_MANGO:
  3672. case TARGET_TYPE_PEACH:
  3673. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  3674. break;
  3675. case TARGET_TYPE_QCA8074:
  3676. wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
  3677. break;
  3678. case TARGET_TYPE_QCA8074V2:
  3679. case TARGET_TYPE_QCA6018:
  3680. case TARGET_TYPE_QCA9574:
  3681. case TARGET_TYPE_QCN6122:
  3682. case TARGET_TYPE_QCA5018:
  3683. wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
  3684. wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
  3685. break;
  3686. case TARGET_TYPE_QCN9160:
  3687. wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
  3688. soc->wlan_cfg_ctx->rxdma1_enable = 0;
  3689. break;
  3690. case TARGET_TYPE_QCN9000:
  3691. wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
  3692. wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
  3693. break;
  3694. case TARGET_TYPE_QCN9224:
  3695. case TARGET_TYPE_QCA5332:
  3696. case TARGET_TYPE_QCN6432:
  3697. wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
  3698. wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
  3699. break;
  3700. default:
  3701. qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
  3702. qdf_assert_always(0);
  3703. break;
  3704. }
  3705. if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
  3706. nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
  3707. wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
  3708. if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
  3709. wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
  3710. wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
  3711. wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
  3712. wlan_cfg_set_num_tx_spl_desc(soc->wlan_cfg_ctx, 0);
  3713. wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
  3714. soc->init_tcl_cmd_cred_ring = false;
  3715. soc->num_tcl_data_rings =
  3716. wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
  3717. soc->num_reo_dest_rings =
  3718. wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
  3719. } else {
  3720. soc->init_tcl_cmd_cred_ring = true;
  3721. soc->num_tx_comp_rings =
  3722. wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
  3723. soc->num_tcl_data_rings =
  3724. wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  3725. soc->num_reo_dest_rings =
  3726. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  3727. }
  3728. }
  3729. void dp_pdev_set_default_reo(struct dp_pdev *pdev)
  3730. {
  3731. struct dp_soc *soc = pdev->soc;
  3732. switch (pdev->pdev_id) {
  3733. case 0:
  3734. pdev->reo_dest =
  3735. wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
  3736. break;
  3737. case 1:
  3738. pdev->reo_dest =
  3739. wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
  3740. break;
  3741. case 2:
  3742. pdev->reo_dest =
  3743. wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
  3744. break;
  3745. default:
  3746. dp_init_err("%pK: Invalid pdev_id %d for reo selection",
  3747. soc, pdev->pdev_id);
  3748. break;
  3749. }
  3750. }
  3751. #ifdef WLAN_SUPPORT_DPDK
  3752. void dp_soc_reset_dpdk_intr_mask(struct dp_soc *soc)
  3753. {
  3754. uint8_t j;
  3755. uint8_t *grp_mask = NULL;
  3756. int group_number, mask, num_ring;
  3757. /* number of tx ring */
  3758. num_ring = soc->num_tcl_data_rings;
  3759. /*
  3760. * group mask for tx completion ring.
  3761. */
  3762. grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
  3763. for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
  3764. /*
  3765. * Group number corresponding to tx offloaded ring.
  3766. */
  3767. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  3768. if (group_number < 0) {
  3769. dp_init_debug("%pK: ring not part of any group; ring_type: %d, ring_num %d",
  3770. soc, WBM2SW_RELEASE, j);
  3771. continue;
  3772. }
  3773. mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx,
  3774. group_number);
  3775. /* reset the tx mask for offloaded ring */
  3776. mask &= (~(1 << j));
  3777. /*
  3778. * reset the interrupt mask for offloaded ring.
  3779. */
  3780. wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx,
  3781. group_number, mask);
  3782. }
  3783. /* number of rx rings */
  3784. num_ring = soc->num_reo_dest_rings;
  3785. /*
  3786. * group mask for reo destination ring.
  3787. */
  3788. grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
  3789. for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
  3790. /*
  3791. * Group number corresponding to rx offloaded ring.
  3792. */
  3793. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  3794. if (group_number < 0) {
  3795. dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
  3796. soc, REO_DST, j);
  3797. continue;
  3798. }
  3799. mask = wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx,
  3800. group_number);
  3801. /* reset the interrupt mask for offloaded ring */
  3802. mask &= (~(1 << j));
  3803. /*
  3804. * set the interrupt mask to zero for rx offloaded radio.
  3805. */
  3806. wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx,
  3807. group_number, mask);
  3808. }
  3809. /*
  3810. * group mask for Rx buffer refill ring
  3811. */
  3812. grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
  3813. for (j = 0; j < MAX_PDEV_CNT; j++) {
  3814. int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
  3815. /*
  3816. * Group number corresponding to rx offloaded ring.
  3817. */
  3818. group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
  3819. if (group_number < 0) {
  3820. dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
  3821. soc, REO_DST, lmac_id);
  3822. continue;
  3823. }
  3824. /* set the interrupt mask for offloaded ring */
  3825. mask = wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  3826. group_number);
  3827. mask &= (~(1 << lmac_id));
  3828. /*
  3829. * set the interrupt mask to zero for rx offloaded radio.
  3830. */
  3831. wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
  3832. group_number, mask);
  3833. }
  3834. grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
  3835. for (j = 0; j < num_ring; j++) {
  3836. /*
  3837. * Group number corresponding to rx err ring.
  3838. */
  3839. group_number = dp_srng_find_ring_in_mask(j, grp_mask);
  3840. if (group_number < 0) {
  3841. dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
  3842. soc, REO_EXCEPTION, j);
  3843. continue;
  3844. }
  3845. wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
  3846. group_number, 0);
  3847. }
  3848. }
  3849. #endif