dp_main.c 114 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <qdf_lro.h>
  22. #include <hal_api.h>
  23. #include <hif.h>
  24. #include <htt.h>
  25. #include <wdi_event.h>
  26. #include <queue.h>
  27. #include "dp_htt.h"
  28. #include "dp_types.h"
  29. #include "dp_internal.h"
  30. #include "dp_tx.h"
  31. #include "dp_rx.h"
  32. #include <cdp_txrx_handle.h>
  33. #include <wlan_cfg.h>
  34. #include "cdp_txrx_cmn_struct.h"
  35. #include <qdf_util.h>
  36. #include "dp_peer.h"
  37. #include "dp_rx_mon.h"
  38. #include "htt_stats.h"
  39. #define DP_INTR_POLL_TIMER_MS 10
  40. #define DP_MCS_LENGTH (6*MAX_MCS)
  41. #define DP_NSS_LENGTH (6*SS_COUNT)
  42. #define DP_RXDMA_ERR_LENGTH (6*MAX_RXDMA_ERRORS)
  43. #define DP_REO_ERR_LENGTH (6*REO_ERROR_TYPE_MAX)
  44. #define DP_CURR_FW_STATS_AVAIL 19
  45. #define DP_HTT_DBG_EXT_STATS_MAX 256
  46. /**
  47. * default_dscp_tid_map - Default DSCP-TID mapping
  48. *
  49. * DSCP TID AC
  50. * 000000 0 WME_AC_BE
  51. * 001000 1 WME_AC_BK
  52. * 010000 1 WME_AC_BK
  53. * 011000 0 WME_AC_BE
  54. * 100000 5 WME_AC_VI
  55. * 101000 5 WME_AC_VI
  56. * 110000 6 WME_AC_VO
  57. * 111000 6 WME_AC_VO
  58. */
  59. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  60. 0, 0, 0, 0, 0, 0, 0, 0,
  61. 1, 1, 1, 1, 1, 1, 1, 1,
  62. 1, 1, 1, 1, 1, 1, 1, 1,
  63. 0, 0, 0, 0, 0, 0, 0, 0,
  64. 5, 5, 5, 5, 5, 5, 5, 5,
  65. 5, 5, 5, 5, 5, 5, 5, 5,
  66. 6, 6, 6, 6, 6, 6, 6, 6,
  67. 6, 6, 6, 6, 6, 6, 6, 6,
  68. };
  69. /**
  70. * @brief Select the type of statistics
  71. */
  72. enum dp_stats_type {
  73. STATS_FW = 0,
  74. STATS_HOST = 1,
  75. STATS_TYPE_MAX = 2,
  76. };
  77. /**
  78. * @brief General Firmware statistics options
  79. *
  80. */
  81. enum dp_fw_stats {
  82. TXRX_FW_STATS_INVALID = -1,
  83. };
  84. /**
  85. * @brief Firmware and Host statistics
  86. * currently supported
  87. */
  88. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  89. {HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
  90. {HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
  91. {HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
  92. {HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
  93. {HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
  94. {HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
  95. {HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
  96. {HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
  97. {HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
  98. {HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
  99. {HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
  100. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  101. {HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
  102. {HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
  103. {HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
  104. {HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
  105. {HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
  106. {HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
  107. {HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
  108. /* Last ENUM for HTT FW STATS */
  109. {DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
  110. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  111. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  112. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  113. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  114. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  115. };
  116. /**
  117. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  118. */
  119. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  120. int ring_type, int ring_num, int mac_id, uint32_t num_entries)
  121. {
  122. void *hal_soc = soc->hal_soc;
  123. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  124. /* TODO: See if we should get align size from hal */
  125. uint32_t ring_base_align = 8;
  126. struct hal_srng_params ring_params;
  127. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  128. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  129. srng->hal_srng = NULL;
  130. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  131. srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
  132. soc->osdev, soc->osdev->dev, srng->alloc_size,
  133. &(srng->base_paddr_unaligned));
  134. if (!srng->base_vaddr_unaligned) {
  135. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  136. FL("alloc failed - ring_type: %d, ring_num %d"),
  137. ring_type, ring_num);
  138. return QDF_STATUS_E_NOMEM;
  139. }
  140. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  141. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  142. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  143. ((unsigned long)(ring_params.ring_base_vaddr) -
  144. (unsigned long)srng->base_vaddr_unaligned);
  145. ring_params.num_entries = num_entries;
  146. /* TODO: Check MSI support and get MSI settings from HIF layer */
  147. ring_params.msi_data = 0;
  148. ring_params.msi_addr = 0;
  149. /* TODO: Setup interrupt timer and batch counter thresholds for
  150. * interrupt mitigation based on ring type
  151. */
  152. ring_params.intr_timer_thres_us = 8;
  153. ring_params.intr_batch_cntr_thres_entries = 1;
  154. /* TODO: Currently hal layer takes care of endianness related settings.
  155. * See if these settings need to passed from DP layer
  156. */
  157. ring_params.flags = 0;
  158. /* Enable low threshold interrupts for rx buffer rings (regular and
  159. * monitor buffer rings.
  160. * TODO: See if this is required for any other ring
  161. */
  162. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF)) {
  163. /* TODO: Setting low threshold to 1/8th of ring size
  164. * see if this needs to be configurable
  165. */
  166. ring_params.low_threshold = num_entries >> 3;
  167. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  168. }
  169. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  170. mac_id, &ring_params);
  171. return 0;
  172. }
  173. /**
  174. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  175. * Any buffers allocated and attached to ring entries are expected to be freed
  176. * before calling this function.
  177. */
  178. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  179. int ring_type, int ring_num)
  180. {
  181. if (!srng->hal_srng) {
  182. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  183. FL("Ring type: %d, num:%d not setup"),
  184. ring_type, ring_num);
  185. return;
  186. }
  187. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  188. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  189. srng->alloc_size,
  190. srng->base_vaddr_unaligned,
  191. srng->base_paddr_unaligned, 0);
  192. }
  193. /* TODO: Need this interface from HIF */
  194. void *hif_get_hal_handle(void *hif_handle);
  195. /*
  196. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  197. * @dp_ctx: DP SOC handle
  198. * @budget: Number of frames/descriptors that can be processed in one shot
  199. *
  200. * Return: remaining budget/quota for the soc device
  201. */
  202. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  203. {
  204. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  205. struct dp_soc *soc = int_ctx->soc;
  206. int ring = 0;
  207. uint32_t work_done = 0;
  208. uint32_t budget = dp_budget;
  209. uint8_t tx_mask = int_ctx->tx_ring_mask;
  210. uint8_t rx_mask = int_ctx->rx_ring_mask;
  211. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  212. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  213. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  214. /* Process Tx completion interrupts first to return back buffers */
  215. if (tx_mask) {
  216. for (ring = 0; ring < soc->num_tcl_data_rings; ring++) {
  217. if (tx_mask & (1 << ring)) {
  218. work_done =
  219. dp_tx_comp_handler(soc, ring, budget);
  220. budget -= work_done;
  221. if (work_done)
  222. QDF_TRACE(QDF_MODULE_ID_DP,
  223. QDF_TRACE_LEVEL_INFO,
  224. "tx mask 0x%x ring %d,"
  225. "budget %d",
  226. tx_mask, ring, budget);
  227. if (budget <= 0)
  228. goto budget_done;
  229. }
  230. }
  231. }
  232. /* Process REO Exception ring interrupt */
  233. if (rx_err_mask) {
  234. work_done = dp_rx_err_process(soc,
  235. soc->reo_exception_ring.hal_srng, budget);
  236. budget -= work_done;
  237. if (work_done)
  238. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  239. "REO Exception Ring: work_done %d budget %d",
  240. work_done, budget);
  241. if (budget <= 0) {
  242. goto budget_done;
  243. }
  244. }
  245. /* Process Rx WBM release ring interrupt */
  246. if (rx_wbm_rel_mask) {
  247. work_done = dp_rx_wbm_err_process(soc,
  248. soc->rx_rel_ring.hal_srng, budget);
  249. budget -= work_done;
  250. if (work_done)
  251. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  252. "WBM Release Ring: work_done %d budget %d",
  253. work_done, budget);
  254. if (budget <= 0) {
  255. goto budget_done;
  256. }
  257. }
  258. /* Process Rx interrupts */
  259. if (rx_mask) {
  260. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  261. if (rx_mask & (1 << ring)) {
  262. work_done =
  263. dp_rx_process(int_ctx,
  264. soc->reo_dest_ring[ring].hal_srng,
  265. budget);
  266. budget -= work_done;
  267. if (work_done)
  268. QDF_TRACE(QDF_MODULE_ID_DP,
  269. QDF_TRACE_LEVEL_INFO,
  270. "rx mask 0x%x ring %d,"
  271. "budget %d",
  272. tx_mask, ring, budget);
  273. if (budget <= 0)
  274. goto budget_done;
  275. }
  276. }
  277. }
  278. if (reo_status_mask)
  279. dp_reo_status_ring_handler(soc);
  280. /* Process Rx monitor interrupts */
  281. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  282. if (int_ctx->rx_mon_ring_mask & (1 << ring)) {
  283. work_done =
  284. dp_mon_process(soc, ring, budget);
  285. budget -= work_done;
  286. }
  287. }
  288. qdf_lro_flush(int_ctx->lro_ctx);
  289. budget_done:
  290. return dp_budget - budget;
  291. }
  292. /* dp_interrupt_timer()- timer poll for interrupts
  293. *
  294. * @arg: SoC Handle
  295. *
  296. * Return:
  297. *
  298. */
  299. #ifdef DP_INTR_POLL_BASED
  300. static void dp_interrupt_timer(void *arg)
  301. {
  302. struct dp_soc *soc = (struct dp_soc *) arg;
  303. int i;
  304. if (qdf_atomic_read(&soc->cmn_init_done)) {
  305. for (i = 0;
  306. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  307. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  308. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  309. }
  310. }
  311. /*
  312. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  313. * @txrx_soc: DP SOC handle
  314. *
  315. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  316. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  317. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  318. *
  319. * Return: 0 for success. nonzero for failure.
  320. */
  321. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  322. {
  323. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  324. int i;
  325. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  326. soc->intr_ctx[i].tx_ring_mask = 0xF;
  327. soc->intr_ctx[i].rx_ring_mask = 0xF;
  328. soc->intr_ctx[i].rx_mon_ring_mask = 0x1;
  329. soc->intr_ctx[i].rx_err_ring_mask = 0x1;
  330. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0x1;
  331. soc->intr_ctx[i].reo_status_ring_mask = 0x1;
  332. soc->intr_ctx[i].soc = soc;
  333. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  334. }
  335. qdf_timer_init(soc->osdev, &soc->int_timer,
  336. dp_interrupt_timer, (void *)soc,
  337. QDF_TIMER_TYPE_WAKE_APPS);
  338. return QDF_STATUS_SUCCESS;
  339. }
  340. /*
  341. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  342. * @txrx_soc: DP SOC handle
  343. *
  344. * Return: void
  345. */
  346. static void dp_soc_interrupt_detach(void *txrx_soc)
  347. {
  348. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  349. qdf_timer_stop(&soc->int_timer);
  350. qdf_timer_free(&soc->int_timer);
  351. }
  352. #else
  353. /*
  354. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  355. * @txrx_soc: DP SOC handle
  356. *
  357. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  358. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  359. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  360. *
  361. * Return: 0 for success. nonzero for failure.
  362. */
  363. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  364. {
  365. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  366. int i = 0;
  367. int num_irq = 0;
  368. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  369. int j = 0;
  370. int ret = 0;
  371. /* Map of IRQ ids registered with one interrupt context */
  372. int irq_id_map[HIF_MAX_GRP_IRQ];
  373. int tx_mask =
  374. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  375. int rx_mask =
  376. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  377. int rx_mon_mask =
  378. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  379. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  380. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  381. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  382. soc->intr_ctx[i].soc = soc;
  383. num_irq = 0;
  384. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  385. if (tx_mask & (1 << j)) {
  386. irq_id_map[num_irq++] =
  387. (wbm2host_tx_completions_ring1 - j);
  388. }
  389. if (rx_mask & (1 << j)) {
  390. irq_id_map[num_irq++] =
  391. (reo2host_destination_ring1 - j);
  392. }
  393. if (rx_mon_mask & (1 << j)) {
  394. irq_id_map[num_irq++] =
  395. (rxdma2host_monitor_destination_mac1
  396. - j);
  397. }
  398. }
  399. ret = hif_register_ext_group_int_handler(soc->hif_handle,
  400. num_irq, irq_id_map,
  401. dp_service_srngs,
  402. &soc->intr_ctx[i]);
  403. if (ret) {
  404. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  405. FL("failed, ret = %d"), ret);
  406. return QDF_STATUS_E_FAILURE;
  407. }
  408. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  409. }
  410. hif_configure_ext_group_interrupts(soc->hif_handle);
  411. return QDF_STATUS_SUCCESS;
  412. }
  413. /*
  414. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  415. * @txrx_soc: DP SOC handle
  416. *
  417. * Return: void
  418. */
  419. static void dp_soc_interrupt_detach(void *txrx_soc)
  420. {
  421. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  422. int i;
  423. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  424. soc->intr_ctx[i].tx_ring_mask = 0;
  425. soc->intr_ctx[i].rx_ring_mask = 0;
  426. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  427. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  428. }
  429. }
  430. #endif
  431. #define AVG_MAX_MPDUS_PER_TID 128
  432. #define AVG_TIDS_PER_CLIENT 2
  433. #define AVG_FLOWS_PER_TID 2
  434. #define AVG_MSDUS_PER_FLOW 128
  435. #define AVG_MSDUS_PER_MPDU 4
  436. /*
  437. * Allocate and setup link descriptor pool that will be used by HW for
  438. * various link and queue descriptors and managed by WBM
  439. */
  440. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  441. {
  442. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  443. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  444. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  445. uint32_t num_mpdus_per_link_desc =
  446. hal_num_mpdus_per_link_desc(soc->hal_soc);
  447. uint32_t num_msdus_per_link_desc =
  448. hal_num_msdus_per_link_desc(soc->hal_soc);
  449. uint32_t num_mpdu_links_per_queue_desc =
  450. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  451. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  452. uint32_t total_link_descs, total_mem_size;
  453. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  454. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  455. uint32_t num_link_desc_banks;
  456. uint32_t last_bank_size = 0;
  457. uint32_t entry_size, num_entries;
  458. int i;
  459. /* Only Tx queue descriptors are allocated from common link descriptor
  460. * pool Rx queue descriptors are not included in this because (REO queue
  461. * extension descriptors) they are expected to be allocated contiguously
  462. * with REO queue descriptors
  463. */
  464. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  465. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  466. num_mpdu_queue_descs = num_mpdu_link_descs /
  467. num_mpdu_links_per_queue_desc;
  468. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  469. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  470. num_msdus_per_link_desc;
  471. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  472. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  473. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  474. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  475. /* Round up to power of 2 */
  476. total_link_descs = 1;
  477. while (total_link_descs < num_entries)
  478. total_link_descs <<= 1;
  479. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  480. FL("total_link_descs: %u, link_desc_size: %d"),
  481. total_link_descs, link_desc_size);
  482. total_mem_size = total_link_descs * link_desc_size;
  483. total_mem_size += link_desc_align;
  484. if (total_mem_size <= max_alloc_size) {
  485. num_link_desc_banks = 0;
  486. last_bank_size = total_mem_size;
  487. } else {
  488. num_link_desc_banks = (total_mem_size) /
  489. (max_alloc_size - link_desc_align);
  490. last_bank_size = total_mem_size %
  491. (max_alloc_size - link_desc_align);
  492. }
  493. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  494. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  495. total_mem_size, num_link_desc_banks);
  496. for (i = 0; i < num_link_desc_banks; i++) {
  497. soc->link_desc_banks[i].base_vaddr_unaligned =
  498. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  499. max_alloc_size,
  500. &(soc->link_desc_banks[i].base_paddr_unaligned));
  501. soc->link_desc_banks[i].size = max_alloc_size;
  502. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  503. soc->link_desc_banks[i].base_vaddr_unaligned) +
  504. ((unsigned long)(
  505. soc->link_desc_banks[i].base_vaddr_unaligned) %
  506. link_desc_align));
  507. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  508. soc->link_desc_banks[i].base_paddr_unaligned) +
  509. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  510. (unsigned long)(
  511. soc->link_desc_banks[i].base_vaddr_unaligned));
  512. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  513. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  514. FL("Link descriptor memory alloc failed"));
  515. goto fail;
  516. }
  517. }
  518. if (last_bank_size) {
  519. /* Allocate last bank in case total memory required is not exact
  520. * multiple of max_alloc_size
  521. */
  522. soc->link_desc_banks[i].base_vaddr_unaligned =
  523. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  524. last_bank_size,
  525. &(soc->link_desc_banks[i].base_paddr_unaligned));
  526. soc->link_desc_banks[i].size = last_bank_size;
  527. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  528. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  529. ((unsigned long)(
  530. soc->link_desc_banks[i].base_vaddr_unaligned) %
  531. link_desc_align));
  532. soc->link_desc_banks[i].base_paddr =
  533. (unsigned long)(
  534. soc->link_desc_banks[i].base_paddr_unaligned) +
  535. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  536. (unsigned long)(
  537. soc->link_desc_banks[i].base_vaddr_unaligned));
  538. }
  539. /* Allocate and setup link descriptor idle list for HW internal use */
  540. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  541. total_mem_size = entry_size * total_link_descs;
  542. if (total_mem_size <= max_alloc_size) {
  543. void *desc;
  544. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  545. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  546. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  547. FL("Link desc idle ring setup failed"));
  548. goto fail;
  549. }
  550. hal_srng_access_start_unlocked(soc->hal_soc,
  551. soc->wbm_idle_link_ring.hal_srng);
  552. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  553. soc->link_desc_banks[i].base_paddr; i++) {
  554. uint32_t num_entries = (soc->link_desc_banks[i].size -
  555. (unsigned long)(
  556. soc->link_desc_banks[i].base_vaddr) -
  557. (unsigned long)(
  558. soc->link_desc_banks[i].base_vaddr_unaligned))
  559. / link_desc_size;
  560. unsigned long paddr = (unsigned long)(
  561. soc->link_desc_banks[i].base_paddr);
  562. while (num_entries && (desc = hal_srng_src_get_next(
  563. soc->hal_soc,
  564. soc->wbm_idle_link_ring.hal_srng))) {
  565. hal_set_link_desc_addr(desc, i, paddr);
  566. num_entries--;
  567. paddr += link_desc_size;
  568. }
  569. }
  570. hal_srng_access_end_unlocked(soc->hal_soc,
  571. soc->wbm_idle_link_ring.hal_srng);
  572. } else {
  573. uint32_t num_scatter_bufs;
  574. uint32_t num_entries_per_buf;
  575. uint32_t rem_entries;
  576. uint8_t *scatter_buf_ptr;
  577. uint16_t scatter_buf_num;
  578. soc->wbm_idle_scatter_buf_size =
  579. hal_idle_list_scatter_buf_size(soc->hal_soc);
  580. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  581. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  582. num_scatter_bufs = (total_mem_size /
  583. soc->wbm_idle_scatter_buf_size) + (total_mem_size %
  584. soc->wbm_idle_scatter_buf_size) ? 1 : 0;
  585. for (i = 0; i < num_scatter_bufs; i++) {
  586. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  587. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  588. soc->wbm_idle_scatter_buf_size,
  589. &(soc->wbm_idle_scatter_buf_base_paddr[i]));
  590. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  591. QDF_TRACE(QDF_MODULE_ID_DP,
  592. QDF_TRACE_LEVEL_ERROR,
  593. FL("Scatter list memory alloc failed"));
  594. goto fail;
  595. }
  596. }
  597. /* Populate idle list scatter buffers with link descriptor
  598. * pointers
  599. */
  600. scatter_buf_num = 0;
  601. scatter_buf_ptr = (uint8_t *)(
  602. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  603. rem_entries = num_entries_per_buf;
  604. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  605. soc->link_desc_banks[i].base_paddr; i++) {
  606. uint32_t num_link_descs =
  607. (soc->link_desc_banks[i].size -
  608. (unsigned long)(
  609. soc->link_desc_banks[i].base_vaddr) -
  610. (unsigned long)(
  611. soc->link_desc_banks[i].base_vaddr_unaligned)) /
  612. link_desc_size;
  613. unsigned long paddr = (unsigned long)(
  614. soc->link_desc_banks[i].base_paddr);
  615. void *desc = NULL;
  616. while (num_link_descs && (desc =
  617. hal_srng_src_get_next(soc->hal_soc,
  618. soc->wbm_idle_link_ring.hal_srng))) {
  619. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  620. i, paddr);
  621. num_link_descs--;
  622. paddr += link_desc_size;
  623. if (rem_entries) {
  624. rem_entries--;
  625. scatter_buf_ptr += link_desc_size;
  626. } else {
  627. rem_entries = num_entries_per_buf;
  628. scatter_buf_num++;
  629. scatter_buf_ptr = (uint8_t *)(
  630. soc->wbm_idle_scatter_buf_base_vaddr[
  631. scatter_buf_num]);
  632. }
  633. }
  634. }
  635. /* Setup link descriptor idle list in HW */
  636. hal_setup_link_idle_list(soc->hal_soc,
  637. soc->wbm_idle_scatter_buf_base_paddr,
  638. soc->wbm_idle_scatter_buf_base_vaddr,
  639. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  640. (uint32_t)(scatter_buf_ptr -
  641. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  642. scatter_buf_num])));
  643. }
  644. return 0;
  645. fail:
  646. if (soc->wbm_idle_link_ring.hal_srng) {
  647. dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
  648. WBM_IDLE_LINK, 0);
  649. }
  650. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  651. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  652. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  653. soc->wbm_idle_scatter_buf_size,
  654. soc->wbm_idle_scatter_buf_base_vaddr[i],
  655. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  656. }
  657. }
  658. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  659. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  660. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  661. soc->link_desc_banks[i].size,
  662. soc->link_desc_banks[i].base_vaddr_unaligned,
  663. soc->link_desc_banks[i].base_paddr_unaligned,
  664. 0);
  665. }
  666. }
  667. return QDF_STATUS_E_FAILURE;
  668. }
  669. #ifdef notused
  670. /*
  671. * Free link descriptor pool that was setup HW
  672. */
  673. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  674. {
  675. int i;
  676. if (soc->wbm_idle_link_ring.hal_srng) {
  677. dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
  678. WBM_IDLE_LINK, 0);
  679. }
  680. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  681. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  682. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  683. soc->wbm_idle_scatter_buf_size,
  684. soc->wbm_idle_scatter_buf_base_vaddr[i],
  685. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  686. }
  687. }
  688. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  689. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  690. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  691. soc->link_desc_banks[i].size,
  692. soc->link_desc_banks[i].base_vaddr_unaligned,
  693. soc->link_desc_banks[i].base_paddr_unaligned,
  694. 0);
  695. }
  696. }
  697. }
  698. #endif /* notused */
  699. /* TODO: Following should be configurable */
  700. #define WBM_RELEASE_RING_SIZE 64
  701. #define TCL_DATA_RING_SIZE 512
  702. #define TX_COMP_RING_SIZE 1024
  703. #define TCL_CMD_RING_SIZE 32
  704. #define TCL_STATUS_RING_SIZE 32
  705. #define REO_DST_RING_SIZE 2048
  706. #define REO_REINJECT_RING_SIZE 32
  707. #define RX_RELEASE_RING_SIZE 1024
  708. #define REO_EXCEPTION_RING_SIZE 128
  709. #define REO_CMD_RING_SIZE 32
  710. #define REO_STATUS_RING_SIZE 32
  711. #define RXDMA_BUF_RING_SIZE 1024
  712. #define RXDMA_REFILL_RING_SIZE 2048
  713. #define RXDMA_MONITOR_BUF_RING_SIZE 1024
  714. #define RXDMA_MONITOR_DST_RING_SIZE 1024
  715. #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
  716. #define RXDMA_MONITOR_DESC_RING_SIZE 1024
  717. /*
  718. * dp_soc_cmn_setup() - Common SoC level initializion
  719. * @soc: Datapath SOC handle
  720. *
  721. * This is an internal function used to setup common SOC data structures,
  722. * to be called from PDEV attach after receiving HW mode capabilities from FW
  723. */
  724. static int dp_soc_cmn_setup(struct dp_soc *soc)
  725. {
  726. int i;
  727. struct hal_reo_params reo_params;
  728. if (qdf_atomic_read(&soc->cmn_init_done))
  729. return 0;
  730. if (dp_peer_find_attach(soc))
  731. goto fail0;
  732. if (dp_hw_link_desc_pool_setup(soc))
  733. goto fail1;
  734. /* Setup SRNG rings */
  735. /* Common rings */
  736. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  737. WBM_RELEASE_RING_SIZE)) {
  738. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  739. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  740. goto fail1;
  741. }
  742. soc->num_tcl_data_rings = 0;
  743. /* Tx data rings */
  744. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  745. soc->num_tcl_data_rings =
  746. wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  747. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  748. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  749. TCL_DATA, i, 0, TCL_DATA_RING_SIZE)) {
  750. QDF_TRACE(QDF_MODULE_ID_DP,
  751. QDF_TRACE_LEVEL_ERROR,
  752. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  753. goto fail1;
  754. }
  755. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  756. WBM2SW_RELEASE, i, 0, TX_COMP_RING_SIZE)) {
  757. QDF_TRACE(QDF_MODULE_ID_DP,
  758. QDF_TRACE_LEVEL_ERROR,
  759. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  760. goto fail1;
  761. }
  762. }
  763. } else {
  764. /* This will be incremented during per pdev ring setup */
  765. soc->num_tcl_data_rings = 0;
  766. }
  767. if (dp_tx_soc_attach(soc)) {
  768. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  769. FL("dp_tx_soc_attach failed"));
  770. goto fail1;
  771. }
  772. /* TCL command and status rings */
  773. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  774. TCL_CMD_RING_SIZE)) {
  775. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  776. FL("dp_srng_setup failed for tcl_cmd_ring"));
  777. goto fail1;
  778. }
  779. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  780. TCL_STATUS_RING_SIZE)) {
  781. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  782. FL("dp_srng_setup failed for tcl_status_ring"));
  783. goto fail1;
  784. }
  785. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  786. * descriptors
  787. */
  788. /* Rx data rings */
  789. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  790. soc->num_reo_dest_rings =
  791. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  792. QDF_TRACE(QDF_MODULE_ID_DP,
  793. QDF_TRACE_LEVEL_ERROR,
  794. FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
  795. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  796. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  797. i, 0, REO_DST_RING_SIZE)) {
  798. QDF_TRACE(QDF_MODULE_ID_DP,
  799. QDF_TRACE_LEVEL_ERROR,
  800. FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
  801. goto fail1;
  802. }
  803. }
  804. } else {
  805. /* This will be incremented during per pdev ring setup */
  806. soc->num_reo_dest_rings = 0;
  807. }
  808. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  809. /* REO reinjection ring */
  810. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  811. REO_REINJECT_RING_SIZE)) {
  812. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  813. FL("dp_srng_setup failed for reo_reinject_ring"));
  814. goto fail1;
  815. }
  816. /* Rx release ring */
  817. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  818. RX_RELEASE_RING_SIZE)) {
  819. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  820. FL("dp_srng_setup failed for rx_rel_ring"));
  821. goto fail1;
  822. }
  823. /* Rx exception ring */
  824. if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
  825. MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
  826. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  827. FL("dp_srng_setup failed for reo_exception_ring"));
  828. goto fail1;
  829. }
  830. /* REO command and status rings */
  831. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  832. REO_CMD_RING_SIZE)) {
  833. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  834. FL("dp_srng_setup failed for reo_cmd_ring"));
  835. goto fail1;
  836. }
  837. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  838. TAILQ_INIT(&soc->rx.reo_cmd_list);
  839. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  840. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  841. REO_STATUS_RING_SIZE)) {
  842. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  843. FL("dp_srng_setup failed for reo_status_ring"));
  844. goto fail1;
  845. }
  846. /* Setup HW REO */
  847. qdf_mem_zero(&reo_params, sizeof(reo_params));
  848. if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx))
  849. reo_params.rx_hash_enabled = true;
  850. hal_reo_setup(soc->hal_soc, &reo_params);
  851. qdf_atomic_set(&soc->cmn_init_done, 1);
  852. qdf_nbuf_queue_init(&soc->htt_stats_msg);
  853. return 0;
  854. fail1:
  855. /*
  856. * Cleanup will be done as part of soc_detach, which will
  857. * be called on pdev attach failure
  858. */
  859. fail0:
  860. return QDF_STATUS_E_FAILURE;
  861. }
  862. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
  863. static void dp_lro_hash_setup(struct dp_soc *soc)
  864. {
  865. struct cdp_lro_hash_config lro_hash;
  866. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  867. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  868. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  869. FL("LRO disabled RX hash disabled"));
  870. return;
  871. }
  872. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  873. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
  874. lro_hash.lro_enable = 1;
  875. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  876. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  877. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  878. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  879. }
  880. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, FL("enabled"));
  881. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  882. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  883. LRO_IPV4_SEED_ARR_SZ));
  884. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  885. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  886. LRO_IPV6_SEED_ARR_SZ));
  887. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  888. "lro_hash: lro_enable: 0x%x"
  889. "lro_hash: tcp_flag 0x%x tcp_flag_mask 0x%x",
  890. lro_hash.lro_enable, lro_hash.tcp_flag,
  891. lro_hash.tcp_flag_mask);
  892. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  893. FL("lro_hash: toeplitz_hash_ipv4:"));
  894. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  895. QDF_TRACE_LEVEL_ERROR,
  896. (void *)lro_hash.toeplitz_hash_ipv4,
  897. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  898. LRO_IPV4_SEED_ARR_SZ));
  899. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  900. FL("lro_hash: toeplitz_hash_ipv6:"));
  901. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  902. QDF_TRACE_LEVEL_ERROR,
  903. (void *)lro_hash.toeplitz_hash_ipv6,
  904. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  905. LRO_IPV6_SEED_ARR_SZ));
  906. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  907. if (soc->cdp_soc.ol_ops->lro_hash_config)
  908. (void)soc->cdp_soc.ol_ops->lro_hash_config
  909. (soc->osif_soc, &lro_hash);
  910. }
  911. /*
  912. * dp_rxdma_ring_setup() - configure the RX DMA rings
  913. * @soc: data path SoC handle
  914. * @pdev: Physical device handle
  915. *
  916. * Return: 0 - success, > 0 - failure
  917. */
  918. #ifdef QCA_HOST2FW_RXBUF_RING
  919. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  920. struct dp_pdev *pdev)
  921. {
  922. int max_mac_rings =
  923. wlan_cfg_get_num_mac_rings
  924. (pdev->wlan_cfg_ctx);
  925. int i;
  926. for (i = 0; i < max_mac_rings; i++) {
  927. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  928. "%s: pdev_id %d mac_id %d\n",
  929. __func__, pdev->pdev_id, i);
  930. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  931. RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
  932. QDF_TRACE(QDF_MODULE_ID_DP,
  933. QDF_TRACE_LEVEL_ERROR,
  934. FL("failed rx mac ring setup"));
  935. return QDF_STATUS_E_FAILURE;
  936. }
  937. }
  938. return QDF_STATUS_SUCCESS;
  939. }
  940. #else
  941. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  942. struct dp_pdev *pdev)
  943. {
  944. return QDF_STATUS_SUCCESS;
  945. }
  946. #endif
  947. /**
  948. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  949. * @pdev - DP_PDEV handle
  950. *
  951. * Return: void
  952. */
  953. static inline void
  954. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  955. {
  956. uint8_t map_id;
  957. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  958. qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
  959. sizeof(default_dscp_tid_map));
  960. }
  961. for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
  962. hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
  963. pdev->dscp_tid_map[map_id],
  964. map_id);
  965. }
  966. }
  967. /*
  968. * dp_pdev_attach_wifi3() - attach txrx pdev
  969. * @osif_pdev: Opaque PDEV handle from OSIF/HDD
  970. * @txrx_soc: Datapath SOC handle
  971. * @htc_handle: HTC handle for host-target interface
  972. * @qdf_osdev: QDF OS device
  973. * @pdev_id: PDEV ID
  974. *
  975. * Return: DP PDEV handle on success, NULL on failure
  976. */
  977. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  978. struct cdp_cfg *ctrl_pdev,
  979. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  980. {
  981. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  982. struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
  983. if (!pdev) {
  984. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  985. FL("DP PDEV memory allocation failed"));
  986. goto fail0;
  987. }
  988. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
  989. if (!pdev->wlan_cfg_ctx) {
  990. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  991. FL("pdev cfg_attach failed"));
  992. qdf_mem_free(pdev);
  993. goto fail0;
  994. }
  995. /*
  996. * set nss pdev config based on soc config
  997. */
  998. wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
  999. (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx) & (1 << pdev->pdev_id)));
  1000. pdev->soc = soc;
  1001. pdev->osif_pdev = ctrl_pdev;
  1002. pdev->pdev_id = pdev_id;
  1003. soc->pdev_list[pdev_id] = pdev;
  1004. soc->pdev_count++;
  1005. TAILQ_INIT(&pdev->vdev_list);
  1006. pdev->vdev_count = 0;
  1007. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  1008. TAILQ_INIT(&pdev->neighbour_peers_list);
  1009. if (dp_soc_cmn_setup(soc)) {
  1010. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1011. FL("dp_soc_cmn_setup failed"));
  1012. goto fail1;
  1013. }
  1014. /* Setup per PDEV TCL rings if configured */
  1015. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1016. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  1017. pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
  1018. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1019. FL("dp_srng_setup failed for tcl_data_ring"));
  1020. goto fail1;
  1021. }
  1022. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  1023. WBM2SW_RELEASE, pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
  1024. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1025. FL("dp_srng_setup failed for tx_comp_ring"));
  1026. goto fail1;
  1027. }
  1028. soc->num_tcl_data_rings++;
  1029. }
  1030. /* Tx specific init */
  1031. if (dp_tx_pdev_attach(pdev)) {
  1032. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1033. FL("dp_tx_pdev_attach failed"));
  1034. goto fail1;
  1035. }
  1036. /* Setup per PDEV REO rings if configured */
  1037. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1038. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  1039. pdev_id, pdev_id, REO_DST_RING_SIZE)) {
  1040. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1041. FL("dp_srng_setup failed for reo_dest_ringn"));
  1042. goto fail1;
  1043. }
  1044. soc->num_reo_dest_rings++;
  1045. }
  1046. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  1047. RXDMA_REFILL_RING_SIZE)) {
  1048. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1049. FL("dp_srng_setup failed rx refill ring"));
  1050. goto fail1;
  1051. }
  1052. if (dp_rxdma_ring_setup(soc, pdev)) {
  1053. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1054. FL("RXDMA ring config failed"));
  1055. goto fail1;
  1056. }
  1057. if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0,
  1058. pdev_id, RXDMA_MONITOR_BUF_RING_SIZE)) {
  1059. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1060. FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
  1061. goto fail1;
  1062. }
  1063. if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0,
  1064. pdev_id, RXDMA_MONITOR_DST_RING_SIZE)) {
  1065. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1066. FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
  1067. goto fail1;
  1068. }
  1069. if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring,
  1070. RXDMA_MONITOR_STATUS, 0, pdev_id,
  1071. RXDMA_MONITOR_STATUS_RING_SIZE)) {
  1072. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1073. FL("dp_srng_setup failed for rxdma_mon_status_ring"));
  1074. goto fail1;
  1075. }
  1076. if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring,
  1077. RXDMA_MONITOR_DESC, 0, pdev_id, RXDMA_MONITOR_DESC_RING_SIZE)) {
  1078. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1079. "dp_srng_setup failed for rxdma_mon_desc_ring\n");
  1080. goto fail1;
  1081. }
  1082. /* Rx specific init */
  1083. if (dp_rx_pdev_attach(pdev)) {
  1084. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1085. FL("dp_rx_pdev_attach failed "));
  1086. goto fail0;
  1087. }
  1088. DP_STATS_INIT(pdev);
  1089. #ifndef CONFIG_WIN
  1090. /* MCL */
  1091. dp_local_peer_id_pool_init(pdev);
  1092. #endif
  1093. dp_dscp_tid_map_setup(pdev);
  1094. /* Rx monitor mode specific init */
  1095. if (dp_rx_pdev_mon_attach(pdev)) {
  1096. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1097. "dp_rx_pdev_attach failed\n");
  1098. goto fail0;
  1099. }
  1100. /* set the reo destination to 1 during initialization */
  1101. pdev->reo_dest = 1;
  1102. return (struct cdp_pdev *)pdev;
  1103. fail1:
  1104. dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
  1105. fail0:
  1106. return NULL;
  1107. }
  1108. /*
  1109. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  1110. * @soc: data path SoC handle
  1111. * @pdev: Physical device handle
  1112. *
  1113. * Return: void
  1114. */
  1115. #ifdef QCA_HOST2FW_RXBUF_RING
  1116. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  1117. struct dp_pdev *pdev)
  1118. {
  1119. int max_mac_rings =
  1120. wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  1121. int i;
  1122. max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
  1123. max_mac_rings : MAX_RX_MAC_RINGS;
  1124. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  1125. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  1126. RXDMA_BUF, 1);
  1127. }
  1128. #else
  1129. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  1130. struct dp_pdev *pdev)
  1131. {
  1132. }
  1133. #endif
  1134. /*
  1135. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  1136. * @pdev: device object
  1137. *
  1138. * Return: void
  1139. */
  1140. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  1141. {
  1142. struct dp_neighbour_peer *peer = NULL;
  1143. struct dp_neighbour_peer *temp_peer = NULL;
  1144. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  1145. neighbour_peer_list_elem, temp_peer) {
  1146. /* delete this peer from the list */
  1147. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  1148. peer, neighbour_peer_list_elem);
  1149. qdf_mem_free(peer);
  1150. }
  1151. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  1152. }
  1153. /*
  1154. * dp_pdev_detach_wifi3() - detach txrx pdev
  1155. * @txrx_pdev: Datapath PDEV handle
  1156. * @force: Force detach
  1157. *
  1158. */
  1159. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
  1160. {
  1161. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  1162. struct dp_soc *soc = pdev->soc;
  1163. dp_tx_pdev_detach(pdev);
  1164. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1165. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  1166. TCL_DATA, pdev->pdev_id);
  1167. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  1168. WBM2SW_RELEASE, pdev->pdev_id);
  1169. }
  1170. dp_rx_pdev_detach(pdev);
  1171. dp_rx_pdev_mon_detach(pdev);
  1172. dp_neighbour_peers_detach(pdev);
  1173. /* Setup per PDEV REO rings if configured */
  1174. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1175. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  1176. REO_DST, pdev->pdev_id);
  1177. }
  1178. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  1179. dp_rxdma_ring_cleanup(soc, pdev);
  1180. dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0);
  1181. dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0);
  1182. dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
  1183. RXDMA_MONITOR_STATUS, 0);
  1184. dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring,
  1185. RXDMA_MONITOR_DESC, 0);
  1186. soc->pdev_list[pdev->pdev_id] = NULL;
  1187. soc->pdev_count--;
  1188. wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
  1189. qdf_mem_free(pdev);
  1190. }
  1191. /*
  1192. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  1193. * @soc: DP SOC handle
  1194. */
  1195. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  1196. {
  1197. struct reo_desc_list_node *desc;
  1198. struct dp_rx_tid *rx_tid;
  1199. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  1200. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  1201. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  1202. rx_tid = &desc->rx_tid;
  1203. qdf_mem_unmap_nbytes_single(soc->osdev,
  1204. rx_tid->hw_qdesc_paddr,
  1205. QDF_DMA_BIDIRECTIONAL,
  1206. rx_tid->hw_qdesc_alloc_size);
  1207. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1208. qdf_mem_free(desc);
  1209. }
  1210. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  1211. qdf_list_destroy(&soc->reo_desc_freelist);
  1212. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  1213. }
  1214. /*
  1215. * dp_soc_detach_wifi3() - Detach txrx SOC
  1216. * @txrx_soc: DP SOC handle
  1217. *
  1218. */
  1219. static void dp_soc_detach_wifi3(void *txrx_soc)
  1220. {
  1221. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1222. int i;
  1223. qdf_atomic_set(&soc->cmn_init_done, 0);
  1224. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1225. if (soc->pdev_list[i])
  1226. dp_pdev_detach_wifi3(
  1227. (struct cdp_pdev *)soc->pdev_list[i], 1);
  1228. }
  1229. dp_peer_find_detach(soc);
  1230. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  1231. * SW descriptors
  1232. */
  1233. /* Free the ring memories */
  1234. /* Common rings */
  1235. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  1236. dp_tx_soc_detach(soc);
  1237. /* Tx data rings */
  1238. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1239. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  1240. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  1241. TCL_DATA, i);
  1242. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  1243. WBM2SW_RELEASE, i);
  1244. }
  1245. }
  1246. /* TCL command and status rings */
  1247. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  1248. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  1249. /* Rx data rings */
  1250. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1251. soc->num_reo_dest_rings =
  1252. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  1253. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  1254. /* TODO: Get number of rings and ring sizes
  1255. * from wlan_cfg
  1256. */
  1257. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  1258. REO_DST, i);
  1259. }
  1260. }
  1261. /* REO reinjection ring */
  1262. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  1263. /* Rx release ring */
  1264. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  1265. /* Rx exception ring */
  1266. /* TODO: Better to store ring_type and ring_num in
  1267. * dp_srng during setup
  1268. */
  1269. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  1270. /* REO command and status rings */
  1271. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  1272. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  1273. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  1274. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  1275. htt_soc_detach(soc->htt_handle);
  1276. dp_reo_desc_freelist_destroy(soc);
  1277. wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
  1278. qdf_mem_free(soc);
  1279. }
  1280. /*
  1281. * dp_rxdma_ring_config() - configure the RX DMA rings
  1282. *
  1283. * This function is used to configure the MAC rings.
  1284. * On MCL host provides buffers in Host2FW ring
  1285. * FW refills (copies) buffers to the ring and updates
  1286. * ring_idx in register
  1287. *
  1288. * @soc: data path SoC handle
  1289. * @pdev: Physical device handle
  1290. *
  1291. * Return: void
  1292. */
  1293. #ifdef QCA_HOST2FW_RXBUF_RING
  1294. static void dp_rxdma_ring_config(struct dp_soc *soc)
  1295. {
  1296. int i;
  1297. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1298. struct dp_pdev *pdev = soc->pdev_list[i];
  1299. if (pdev) {
  1300. int mac_id = 0;
  1301. int j;
  1302. bool dbs_enable = 0;
  1303. int max_mac_rings =
  1304. wlan_cfg_get_num_mac_rings
  1305. (pdev->wlan_cfg_ctx);
  1306. htt_srng_setup(soc->htt_handle, 0,
  1307. pdev->rx_refill_buf_ring.hal_srng,
  1308. RXDMA_BUF);
  1309. if (soc->cdp_soc.ol_ops->
  1310. is_hw_dbs_2x2_capable) {
  1311. dbs_enable = soc->cdp_soc.ol_ops->
  1312. is_hw_dbs_2x2_capable(soc->psoc);
  1313. }
  1314. if (dbs_enable) {
  1315. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1316. QDF_TRACE_LEVEL_ERROR,
  1317. FL("DBS enabled max_mac_rings %d\n"),
  1318. max_mac_rings);
  1319. } else {
  1320. max_mac_rings = 1;
  1321. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1322. QDF_TRACE_LEVEL_ERROR,
  1323. FL("DBS disabled, max_mac_rings %d\n"),
  1324. max_mac_rings);
  1325. }
  1326. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1327. FL("pdev_id %d max_mac_rings %d\n"),
  1328. pdev->pdev_id, max_mac_rings);
  1329. for (j = 0; j < max_mac_rings; j++) {
  1330. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1331. QDF_TRACE_LEVEL_ERROR,
  1332. FL("mac_id %d\n"), mac_id);
  1333. htt_srng_setup(soc->htt_handle, mac_id,
  1334. pdev->rx_mac_buf_ring[j]
  1335. .hal_srng,
  1336. RXDMA_BUF);
  1337. mac_id++;
  1338. }
  1339. }
  1340. }
  1341. }
  1342. #else
  1343. static void dp_rxdma_ring_config(struct dp_soc *soc)
  1344. {
  1345. int i;
  1346. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1347. struct dp_pdev *pdev = soc->pdev_list[i];
  1348. if (pdev) {
  1349. htt_srng_setup(soc->htt_handle, i,
  1350. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  1351. htt_srng_setup(soc->htt_handle, i,
  1352. pdev->rxdma_mon_buf_ring.hal_srng,
  1353. RXDMA_MONITOR_BUF);
  1354. htt_srng_setup(soc->htt_handle, i,
  1355. pdev->rxdma_mon_dst_ring.hal_srng,
  1356. RXDMA_MONITOR_DST);
  1357. htt_srng_setup(soc->htt_handle, i,
  1358. pdev->rxdma_mon_status_ring.hal_srng,
  1359. RXDMA_MONITOR_STATUS);
  1360. htt_srng_setup(soc->htt_handle, i,
  1361. pdev->rxdma_mon_desc_ring.hal_srng,
  1362. RXDMA_MONITOR_DESC);
  1363. }
  1364. }
  1365. }
  1366. #endif
  1367. /*
  1368. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  1369. * @txrx_soc: Datapath SOC handle
  1370. */
  1371. static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  1372. {
  1373. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  1374. htt_soc_attach_target(soc->htt_handle);
  1375. dp_rxdma_ring_config(soc);
  1376. DP_STATS_INIT(soc);
  1377. return 0;
  1378. }
  1379. /*
  1380. * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
  1381. * @txrx_soc: Datapath SOC handle
  1382. */
  1383. static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
  1384. {
  1385. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  1386. return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
  1387. }
  1388. /*
  1389. * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
  1390. * @txrx_soc: Datapath SOC handle
  1391. * @nss_cfg: nss config
  1392. */
  1393. static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
  1394. {
  1395. struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
  1396. wlan_cfg_set_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx, config);
  1397. if (config) {
  1398. /*
  1399. * disable dp interrupt if nss enabled
  1400. */
  1401. wlan_cfg_set_num_contexts(dsoc->wlan_cfg_ctx, 0);
  1402. }
  1403. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1404. FL("nss-wifi<0> nss config is enabled"));
  1405. }
  1406. /*
  1407. * dp_vdev_attach_wifi3() - attach txrx vdev
  1408. * @txrx_pdev: Datapath PDEV handle
  1409. * @vdev_mac_addr: MAC address of the virtual interface
  1410. * @vdev_id: VDEV Id
  1411. * @wlan_op_mode: VDEV operating mode
  1412. *
  1413. * Return: DP VDEV handle on success, NULL on failure
  1414. */
  1415. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
  1416. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  1417. {
  1418. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  1419. struct dp_soc *soc = pdev->soc;
  1420. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  1421. if (!vdev) {
  1422. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1423. FL("DP VDEV memory allocation failed"));
  1424. goto fail0;
  1425. }
  1426. vdev->pdev = pdev;
  1427. vdev->vdev_id = vdev_id;
  1428. vdev->opmode = op_mode;
  1429. vdev->osdev = soc->osdev;
  1430. vdev->osif_rx = NULL;
  1431. vdev->osif_rsim_rx_decap = NULL;
  1432. vdev->osif_rx_mon = NULL;
  1433. vdev->osif_tx_free_ext = NULL;
  1434. vdev->osif_vdev = NULL;
  1435. vdev->delete.pending = 0;
  1436. vdev->safemode = 0;
  1437. vdev->drop_unenc = 1;
  1438. #ifdef notyet
  1439. vdev->filters_num = 0;
  1440. #endif
  1441. qdf_mem_copy(
  1442. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  1443. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  1444. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  1445. vdev->dscp_tid_map_id = 0;
  1446. vdev->mcast_enhancement_en = 0;
  1447. /* TODO: Initialize default HTT meta data that will be used in
  1448. * TCL descriptors for packets transmitted from this VDEV
  1449. */
  1450. TAILQ_INIT(&vdev->peer_list);
  1451. /* add this vdev into the pdev's list */
  1452. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  1453. pdev->vdev_count++;
  1454. dp_tx_vdev_attach(vdev);
  1455. #ifdef DP_INTR_POLL_BASED
  1456. if (wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
  1457. if (pdev->vdev_count == 1)
  1458. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  1459. }
  1460. #endif
  1461. dp_lro_hash_setup(soc);
  1462. /* LRO */
  1463. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  1464. wlan_op_mode_sta == vdev->opmode)
  1465. vdev->lro_enable = true;
  1466. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1467. "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
  1468. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1469. "Created vdev %p (%pM)", vdev, vdev->mac_addr.raw);
  1470. DP_STATS_INIT(vdev);
  1471. return (struct cdp_vdev *)vdev;
  1472. fail0:
  1473. return NULL;
  1474. }
  1475. /**
  1476. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  1477. * @vdev: Datapath VDEV handle
  1478. * @osif_vdev: OSIF vdev handle
  1479. * @txrx_ops: Tx and Rx operations
  1480. *
  1481. * Return: DP VDEV handle on success, NULL on failure
  1482. */
  1483. static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
  1484. void *osif_vdev,
  1485. struct ol_txrx_ops *txrx_ops)
  1486. {
  1487. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1488. vdev->osif_vdev = osif_vdev;
  1489. vdev->osif_rx = txrx_ops->rx.rx;
  1490. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  1491. vdev->osif_rx_mon = txrx_ops->rx.mon;
  1492. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  1493. #ifdef notyet
  1494. #if ATH_SUPPORT_WAPI
  1495. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  1496. #endif
  1497. #endif
  1498. #ifdef UMAC_SUPPORT_PROXY_ARP
  1499. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  1500. #endif
  1501. vdev->me_convert = txrx_ops->me_convert;
  1502. /* TODO: Enable the following once Tx code is integrated */
  1503. txrx_ops->tx.tx = dp_tx_send;
  1504. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1505. "DP Vdev Register success");
  1506. }
  1507. /*
  1508. * dp_vdev_detach_wifi3() - Detach txrx vdev
  1509. * @txrx_vdev: Datapath VDEV handle
  1510. * @callback: Callback OL_IF on completion of detach
  1511. * @cb_context: Callback context
  1512. *
  1513. */
  1514. static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
  1515. ol_txrx_vdev_delete_cb callback, void *cb_context)
  1516. {
  1517. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1518. struct dp_pdev *pdev = vdev->pdev;
  1519. struct dp_soc *soc = pdev->soc;
  1520. /* preconditions */
  1521. qdf_assert(vdev);
  1522. /* remove the vdev from its parent pdev's list */
  1523. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  1524. /*
  1525. * Use peer_ref_mutex while accessing peer_list, in case
  1526. * a peer is in the process of being removed from the list.
  1527. */
  1528. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1529. /* check that the vdev has no peers allocated */
  1530. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  1531. /* debug print - will be removed later */
  1532. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  1533. FL("not deleting vdev object %p (%pM)"
  1534. "until deletion finishes for all its peers"),
  1535. vdev, vdev->mac_addr.raw);
  1536. /* indicate that the vdev needs to be deleted */
  1537. vdev->delete.pending = 1;
  1538. vdev->delete.callback = callback;
  1539. vdev->delete.context = cb_context;
  1540. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1541. return;
  1542. }
  1543. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1544. dp_tx_vdev_detach(vdev);
  1545. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1546. FL("deleting vdev object %p (%pM)"), vdev, vdev->mac_addr.raw);
  1547. qdf_mem_free(vdev);
  1548. if (callback)
  1549. callback(cb_context);
  1550. }
  1551. /*
  1552. * dp_peer_create_wifi3() - attach txrx peer
  1553. * @txrx_vdev: Datapath VDEV handle
  1554. * @peer_mac_addr: Peer MAC address
  1555. *
  1556. * Return: DP peeer handle on success, NULL on failure
  1557. */
  1558. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  1559. uint8_t *peer_mac_addr)
  1560. {
  1561. struct dp_peer *peer;
  1562. int i;
  1563. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1564. struct dp_pdev *pdev;
  1565. struct dp_soc *soc;
  1566. /* preconditions */
  1567. qdf_assert(vdev);
  1568. qdf_assert(peer_mac_addr);
  1569. pdev = vdev->pdev;
  1570. soc = pdev->soc;
  1571. #ifdef notyet
  1572. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  1573. soc->mempool_ol_ath_peer);
  1574. #else
  1575. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  1576. #endif
  1577. if (!peer)
  1578. return NULL; /* failure */
  1579. qdf_mem_zero(peer, sizeof(struct dp_peer));
  1580. TAILQ_INIT(&peer->ast_entry_list);
  1581. qdf_mem_copy(&peer->self_ast_entry.mac_addr, peer_mac_addr,
  1582. DP_MAC_ADDR_LEN);
  1583. peer->self_ast_entry.peer = peer;
  1584. TAILQ_INSERT_TAIL(&peer->ast_entry_list, &peer->self_ast_entry,
  1585. ast_entry_elem);
  1586. qdf_spinlock_create(&peer->peer_info_lock);
  1587. /* store provided params */
  1588. peer->vdev = vdev;
  1589. qdf_mem_copy(
  1590. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  1591. /* TODO: See of rx_opt_proc is really required */
  1592. peer->rx_opt_proc = soc->rx_opt_proc;
  1593. /* initialize the peer_id */
  1594. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  1595. peer->peer_ids[i] = HTT_INVALID_PEER;
  1596. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1597. qdf_atomic_init(&peer->ref_cnt);
  1598. /* keep one reference for attach */
  1599. qdf_atomic_inc(&peer->ref_cnt);
  1600. /* add this peer into the vdev's list */
  1601. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  1602. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1603. /* TODO: See if hash based search is required */
  1604. dp_peer_find_hash_add(soc, peer);
  1605. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1606. "vdev %p created peer %p (%pM) ref_cnt: %d",
  1607. vdev, peer, peer->mac_addr.raw,
  1608. qdf_atomic_read(&peer->ref_cnt));
  1609. /*
  1610. * For every peer MAp message search and set if bss_peer
  1611. */
  1612. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  1613. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1614. "vdev bss_peer!!!!");
  1615. peer->bss_peer = 1;
  1616. vdev->vap_bss_peer = peer;
  1617. }
  1618. #ifndef CONFIG_WIN
  1619. dp_local_peer_id_alloc(pdev, peer);
  1620. #endif
  1621. DP_STATS_INIT(peer);
  1622. return (void *)peer;
  1623. }
  1624. /*
  1625. * dp_peer_setup_wifi3() - initialize the peer
  1626. * @vdev_hdl: virtual device object
  1627. * @peer: Peer object
  1628. *
  1629. * Return: void
  1630. */
  1631. static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  1632. {
  1633. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  1634. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  1635. struct dp_pdev *pdev;
  1636. struct dp_soc *soc;
  1637. bool hash_based = 0;
  1638. enum cdp_host_reo_dest_ring reo_dest;
  1639. /* preconditions */
  1640. qdf_assert(vdev);
  1641. qdf_assert(peer);
  1642. pdev = vdev->pdev;
  1643. soc = pdev->soc;
  1644. dp_peer_rx_init(pdev, peer);
  1645. peer->last_assoc_rcvd = 0;
  1646. peer->last_disassoc_rcvd = 0;
  1647. peer->last_deauth_rcvd = 0;
  1648. hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  1649. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1650. FL("hash based steering %d\n"), hash_based);
  1651. if (!hash_based)
  1652. reo_dest = pdev->reo_dest;
  1653. else
  1654. reo_dest = 1;
  1655. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  1656. /* TODO: Check the destination ring number to be passed to FW */
  1657. soc->cdp_soc.ol_ops->peer_set_default_routing(
  1658. pdev->osif_pdev, peer->mac_addr.raw,
  1659. peer->vdev->vdev_id, hash_based, reo_dest);
  1660. }
  1661. return;
  1662. }
  1663. /*
  1664. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  1665. * @vdev_handle: virtual device object
  1666. * @htt_pkt_type: type of pkt
  1667. *
  1668. * Return: void
  1669. */
  1670. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  1671. enum htt_cmn_pkt_type val)
  1672. {
  1673. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1674. vdev->tx_encap_type = val;
  1675. }
  1676. /*
  1677. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  1678. * @vdev_handle: virtual device object
  1679. * @htt_pkt_type: type of pkt
  1680. *
  1681. * Return: void
  1682. */
  1683. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  1684. enum htt_cmn_pkt_type val)
  1685. {
  1686. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1687. vdev->rx_decap_type = val;
  1688. }
  1689. /*
  1690. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  1691. * @pdev_handle: physical device object
  1692. * @val: reo destination ring index (1 - 4)
  1693. *
  1694. * Return: void
  1695. */
  1696. static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
  1697. enum cdp_host_reo_dest_ring val)
  1698. {
  1699. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  1700. if (pdev)
  1701. pdev->reo_dest = val;
  1702. }
  1703. /*
  1704. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  1705. * @pdev_handle: physical device object
  1706. *
  1707. * Return: reo destination ring index
  1708. */
  1709. static enum cdp_host_reo_dest_ring
  1710. dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
  1711. {
  1712. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  1713. if (pdev)
  1714. return pdev->reo_dest;
  1715. else
  1716. return cdp_host_reo_dest_ring_unknown;
  1717. }
  1718. #ifdef QCA_SUPPORT_SON
  1719. static void dp_son_peer_authorize(struct dp_peer *peer)
  1720. {
  1721. struct dp_soc *soc;
  1722. soc = peer->vdev->pdev->soc;
  1723. peer->peer_bs_inact_flag = 0;
  1724. peer->peer_bs_inact = soc->pdev_bs_inact_reload;
  1725. return;
  1726. }
  1727. #else
  1728. static void dp_son_peer_authorize(struct dp_peer *peer)
  1729. {
  1730. return;
  1731. }
  1732. #endif
  1733. /*
  1734. * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
  1735. * @pdev_handle: device object
  1736. * @val: value to be set
  1737. *
  1738. * Return: void
  1739. */
  1740. static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  1741. uint32_t val)
  1742. {
  1743. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  1744. /* Enable/Disable smart mesh filtering. This flag will be checked
  1745. * during rx processing to check if packets are from NAC clients.
  1746. */
  1747. pdev->filter_neighbour_peers = val;
  1748. return 0;
  1749. }
  1750. /*
  1751. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  1752. * address for smart mesh filtering
  1753. * @pdev_handle: device object
  1754. * @cmd: Add/Del command
  1755. * @macaddr: nac client mac address
  1756. *
  1757. * Return: void
  1758. */
  1759. static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  1760. uint32_t cmd, uint8_t *macaddr)
  1761. {
  1762. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  1763. struct dp_neighbour_peer *peer = NULL;
  1764. if (!macaddr)
  1765. goto fail0;
  1766. /* Store address of NAC (neighbour peer) which will be checked
  1767. * against TA of received packets.
  1768. */
  1769. if (cmd == DP_NAC_PARAM_ADD) {
  1770. peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
  1771. sizeof(*peer));
  1772. if (!peer) {
  1773. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1774. FL("DP neighbour peer node memory allocation failed"));
  1775. goto fail0;
  1776. }
  1777. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  1778. macaddr, DP_MAC_ADDR_LEN);
  1779. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  1780. /* add this neighbour peer into the list */
  1781. TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
  1782. neighbour_peer_list_elem);
  1783. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  1784. return 1;
  1785. } else if (cmd == DP_NAC_PARAM_DEL) {
  1786. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  1787. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  1788. neighbour_peer_list_elem) {
  1789. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1790. macaddr, DP_MAC_ADDR_LEN)) {
  1791. /* delete this peer from the list */
  1792. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  1793. peer, neighbour_peer_list_elem);
  1794. qdf_mem_free(peer);
  1795. break;
  1796. }
  1797. }
  1798. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  1799. return 1;
  1800. }
  1801. fail0:
  1802. return 0;
  1803. }
  1804. /*
  1805. * dp_peer_authorize() - authorize txrx peer
  1806. * @peer_handle: Datapath peer handle
  1807. * @authorize
  1808. *
  1809. */
  1810. static void dp_peer_authorize(void *peer_handle, uint32_t authorize)
  1811. {
  1812. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1813. struct dp_soc *soc;
  1814. if (peer != NULL) {
  1815. soc = peer->vdev->pdev->soc;
  1816. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1817. dp_son_peer_authorize(peer);
  1818. peer->authorize = authorize ? 1 : 0;
  1819. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1820. }
  1821. }
  1822. /*
  1823. * dp_peer_unref_delete() - unref and delete peer
  1824. * @peer_handle: Datapath peer handle
  1825. *
  1826. */
  1827. void dp_peer_unref_delete(void *peer_handle)
  1828. {
  1829. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1830. struct dp_vdev *vdev = peer->vdev;
  1831. struct dp_pdev *pdev = vdev->pdev;
  1832. struct dp_soc *soc = pdev->soc;
  1833. struct dp_peer *tmppeer;
  1834. int found = 0;
  1835. uint16_t peer_id;
  1836. uint16_t hw_peer_id;
  1837. struct dp_ast_entry *ast_entry;
  1838. /*
  1839. * Hold the lock all the way from checking if the peer ref count
  1840. * is zero until the peer references are removed from the hash
  1841. * table and vdev list (if the peer ref count is zero).
  1842. * This protects against a new HL tx operation starting to use the
  1843. * peer object just after this function concludes it's done being used.
  1844. * Furthermore, the lock needs to be held while checking whether the
  1845. * vdev's list of peers is empty, to make sure that list is not modified
  1846. * concurrently with the empty check.
  1847. */
  1848. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1849. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1850. "%s: peer %p ref_cnt(before decrement): %d\n", __func__,
  1851. peer, qdf_atomic_read(&peer->ref_cnt));
  1852. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  1853. peer_id = peer->peer_ids[0];
  1854. /*
  1855. * Make sure that the reference to the peer in
  1856. * peer object map is removed
  1857. */
  1858. if (peer_id != HTT_INVALID_PEER)
  1859. soc->peer_id_to_obj_map[peer_id] = NULL;
  1860. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1861. "Deleting peer %p (%pM)", peer, peer->mac_addr.raw);
  1862. /* remove the reference to the peer from the hash table */
  1863. dp_peer_find_hash_remove(soc, peer);
  1864. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  1865. if (tmppeer == peer) {
  1866. found = 1;
  1867. break;
  1868. }
  1869. }
  1870. if (found) {
  1871. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  1872. peer_list_elem);
  1873. } else {
  1874. /*Ignoring the remove operation as peer not found*/
  1875. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  1876. "peer %p not found in vdev (%p)->peer_list:%p",
  1877. peer, vdev, &peer->vdev->peer_list);
  1878. }
  1879. /* cleanup the peer data */
  1880. dp_peer_cleanup(vdev, peer);
  1881. /* check whether the parent vdev has no peers left */
  1882. if (TAILQ_EMPTY(&vdev->peer_list)) {
  1883. /*
  1884. * Now that there are no references to the peer, we can
  1885. * release the peer reference lock.
  1886. */
  1887. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1888. /*
  1889. * Check if the parent vdev was waiting for its peers
  1890. * to be deleted, in order for it to be deleted too.
  1891. */
  1892. if (vdev->delete.pending) {
  1893. ol_txrx_vdev_delete_cb vdev_delete_cb =
  1894. vdev->delete.callback;
  1895. void *vdev_delete_context =
  1896. vdev->delete.context;
  1897. QDF_TRACE(QDF_MODULE_ID_DP,
  1898. QDF_TRACE_LEVEL_INFO_HIGH,
  1899. FL("deleting vdev object %p (%pM)"
  1900. " - its last peer is done"),
  1901. vdev, vdev->mac_addr.raw);
  1902. /* all peers are gone, go ahead and delete it */
  1903. qdf_mem_free(vdev);
  1904. if (vdev_delete_cb)
  1905. vdev_delete_cb(vdev_delete_context);
  1906. }
  1907. } else {
  1908. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1909. }
  1910. #ifdef notyet
  1911. qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
  1912. #else
  1913. TAILQ_FOREACH(ast_entry, &peer->ast_entry_list,
  1914. ast_entry_elem) {
  1915. hw_peer_id = ast_entry->ast_idx;
  1916. if (peer->self_ast_entry.ast_idx != hw_peer_id)
  1917. qdf_mem_free(ast_entry);
  1918. else
  1919. peer->self_ast_entry.ast_idx =
  1920. HTT_INVALID_PEER;
  1921. soc->ast_table[hw_peer_id] = NULL;
  1922. }
  1923. qdf_mem_free(peer);
  1924. #endif
  1925. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  1926. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
  1927. vdev->vdev_id, peer->mac_addr.raw);
  1928. }
  1929. } else {
  1930. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1931. }
  1932. }
  1933. /*
  1934. * dp_peer_detach_wifi3() – Detach txrx peer
  1935. * @peer_handle: Datapath peer handle
  1936. *
  1937. */
  1938. static void dp_peer_delete_wifi3(void *peer_handle)
  1939. {
  1940. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1941. /* redirect the peer's rx delivery function to point to a
  1942. * discard func
  1943. */
  1944. peer->rx_opt_proc = dp_rx_discard;
  1945. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1946. FL("peer %p (%pM)"), peer, peer->mac_addr.raw);
  1947. #ifndef CONFIG_WIN
  1948. dp_local_peer_id_free(peer->vdev->pdev, peer);
  1949. #endif
  1950. qdf_spinlock_destroy(&peer->peer_info_lock);
  1951. /*
  1952. * Remove the reference added during peer_attach.
  1953. * The peer will still be left allocated until the
  1954. * PEER_UNMAP message arrives to remove the other
  1955. * reference, added by the PEER_MAP message.
  1956. */
  1957. dp_peer_unref_delete(peer_handle);
  1958. }
  1959. /*
  1960. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  1961. * @peer_handle: Datapath peer handle
  1962. *
  1963. */
  1964. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
  1965. {
  1966. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  1967. return vdev->mac_addr.raw;
  1968. }
  1969. /*
  1970. * dp_vdev_set_wds() - Enable per packet stats
  1971. * @vdev_handle: DP VDEV handle
  1972. * @val: value
  1973. *
  1974. * Return: none
  1975. */
  1976. static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
  1977. {
  1978. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1979. vdev->wds_enabled = val;
  1980. return 0;
  1981. }
  1982. /*
  1983. * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
  1984. * @peer_handle: Datapath peer handle
  1985. *
  1986. */
  1987. static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
  1988. uint8_t vdev_id)
  1989. {
  1990. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  1991. struct dp_vdev *vdev = NULL;
  1992. if (qdf_unlikely(!pdev))
  1993. return NULL;
  1994. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1995. if (vdev->vdev_id == vdev_id)
  1996. break;
  1997. }
  1998. return (struct cdp_vdev *)vdev;
  1999. }
  2000. static int dp_get_opmode(struct cdp_vdev *vdev_handle)
  2001. {
  2002. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2003. return vdev->opmode;
  2004. }
  2005. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
  2006. {
  2007. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  2008. struct dp_pdev *pdev = vdev->pdev;
  2009. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  2010. }
  2011. /**
  2012. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  2013. * @vdev_handle: Datapath VDEV handle
  2014. * @smart_monitor: Flag to denote if its smart monitor mode
  2015. *
  2016. * Return: 0 on success, not 0 on failure
  2017. */
  2018. static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
  2019. uint8_t smart_monitor)
  2020. {
  2021. /* Many monitor VAPs can exists in a system but only one can be up at
  2022. * anytime
  2023. */
  2024. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2025. struct dp_pdev *pdev;
  2026. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  2027. struct dp_soc *soc;
  2028. uint8_t pdev_id;
  2029. qdf_assert(vdev);
  2030. pdev = vdev->pdev;
  2031. pdev_id = pdev->pdev_id;
  2032. soc = pdev->soc;
  2033. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  2034. "pdev=%p, pdev_id=%d, soc=%p vdev=%p\n",
  2035. pdev, pdev_id, soc, vdev);
  2036. /*Check if current pdev's monitor_vdev exists */
  2037. if (pdev->monitor_vdev) {
  2038. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2039. "vdev=%p\n", vdev);
  2040. qdf_assert(vdev);
  2041. }
  2042. pdev->monitor_vdev = vdev;
  2043. /* If smart monitor mode, do not configure monitor ring */
  2044. if (smart_monitor)
  2045. return QDF_STATUS_SUCCESS;
  2046. htt_tlv_filter.mpdu_start = 1;
  2047. htt_tlv_filter.msdu_start = 1;
  2048. htt_tlv_filter.packet = 1;
  2049. htt_tlv_filter.msdu_end = 1;
  2050. htt_tlv_filter.mpdu_end = 1;
  2051. htt_tlv_filter.packet_header = 1;
  2052. htt_tlv_filter.attention = 1;
  2053. htt_tlv_filter.ppdu_start = 0;
  2054. htt_tlv_filter.ppdu_end = 0;
  2055. htt_tlv_filter.ppdu_end_user_stats = 0;
  2056. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  2057. htt_tlv_filter.ppdu_end_status_done = 0;
  2058. htt_tlv_filter.enable_fp = 1;
  2059. htt_tlv_filter.enable_md = 0;
  2060. htt_tlv_filter.enable_mo = 1;
  2061. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  2062. pdev->rxdma_mon_dst_ring.hal_srng,
  2063. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
  2064. htt_tlv_filter.mpdu_start = 1;
  2065. htt_tlv_filter.msdu_start = 1;
  2066. htt_tlv_filter.packet = 0;
  2067. htt_tlv_filter.msdu_end = 1;
  2068. htt_tlv_filter.mpdu_end = 1;
  2069. htt_tlv_filter.packet_header = 1;
  2070. htt_tlv_filter.attention = 1;
  2071. htt_tlv_filter.ppdu_start = 1;
  2072. htt_tlv_filter.ppdu_end = 1;
  2073. htt_tlv_filter.ppdu_end_user_stats = 1;
  2074. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  2075. htt_tlv_filter.ppdu_end_status_done = 1;
  2076. htt_tlv_filter.enable_fp = 1;
  2077. htt_tlv_filter.enable_md = 1;
  2078. htt_tlv_filter.enable_mo = 1;
  2079. /*
  2080. * htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  2081. * pdev->rxdma_mon_status_ring.hal_srng,
  2082. * RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  2083. */
  2084. return QDF_STATUS_SUCCESS;
  2085. }
  2086. #ifdef MESH_MODE_SUPPORT
  2087. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  2088. {
  2089. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  2090. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2091. FL("val %d"), val);
  2092. vdev->mesh_vdev = val;
  2093. }
  2094. /*
  2095. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  2096. * @vdev_hdl: virtual device object
  2097. * @val: value to be set
  2098. *
  2099. * Return: void
  2100. */
  2101. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  2102. {
  2103. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  2104. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2105. FL("val %d"), val);
  2106. vdev->mesh_rx_filter = val;
  2107. }
  2108. #endif
  2109. /**
  2110. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  2111. * @vdev: DP VDEV handle
  2112. *
  2113. * return: void
  2114. */
  2115. void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
  2116. {
  2117. struct dp_peer *peer = NULL;
  2118. int i;
  2119. qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
  2120. qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
  2121. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2122. if (!peer)
  2123. return;
  2124. for (i = 0; i <= MAX_MCS; i++) {
  2125. DP_STATS_AGGR(vdev, peer, tx.pkt_type[0].mcs_count[i]);
  2126. DP_STATS_AGGR(vdev, peer, tx.pkt_type[1].mcs_count[i]);
  2127. DP_STATS_AGGR(vdev, peer, tx.pkt_type[2].mcs_count[i]);
  2128. DP_STATS_AGGR(vdev, peer, tx.pkt_type[3].mcs_count[i]);
  2129. DP_STATS_AGGR(vdev, peer, tx.pkt_type[4].mcs_count[i]);
  2130. DP_STATS_AGGR(vdev, peer, rx.pkt_type[0].mcs_count[i]);
  2131. DP_STATS_AGGR(vdev, peer, rx.pkt_type[1].mcs_count[i]);
  2132. DP_STATS_AGGR(vdev, peer, rx.pkt_type[2].mcs_count[i]);
  2133. DP_STATS_AGGR(vdev, peer, rx.pkt_type[3].mcs_count[i]);
  2134. DP_STATS_AGGR(vdev, peer, rx.pkt_type[4].mcs_count[i]);
  2135. }
  2136. for (i = 0; i < SUPPORTED_BW; i++) {
  2137. DP_STATS_AGGR(vdev, peer, tx.bw[i]);
  2138. DP_STATS_AGGR(vdev, peer, rx.bw[i]);
  2139. }
  2140. for (i = 0; i < SS_COUNT; i++)
  2141. DP_STATS_AGGR(vdev, peer, rx.nss[i]);
  2142. for (i = 0; i < WME_AC_MAX; i++) {
  2143. DP_STATS_AGGR(vdev, peer, tx.wme_ac_type[i]);
  2144. DP_STATS_AGGR(vdev, peer, rx.wme_ac_type[i]);
  2145. DP_STATS_AGGR(vdev, peer, tx.excess_retries_ac[i]);
  2146. }
  2147. for (i = 0; i < MAX_MCS + 1; i++) {
  2148. DP_STATS_AGGR(vdev, peer, tx.sgi_count[i]);
  2149. DP_STATS_AGGR(vdev, peer, rx.sgi_count[i]);
  2150. }
  2151. DP_STATS_AGGR_PKT(vdev, peer, tx.comp_pkt);
  2152. DP_STATS_AGGR_PKT(vdev, peer, tx.ucast);
  2153. DP_STATS_AGGR_PKT(vdev, peer, tx.mcast);
  2154. DP_STATS_AGGR_PKT(vdev, peer, tx.tx_success);
  2155. DP_STATS_AGGR(vdev, peer, tx.tx_failed);
  2156. DP_STATS_AGGR(vdev, peer, tx.ofdma);
  2157. DP_STATS_AGGR(vdev, peer, tx.stbc);
  2158. DP_STATS_AGGR(vdev, peer, tx.ldpc);
  2159. DP_STATS_AGGR(vdev, peer, tx.retries);
  2160. DP_STATS_AGGR(vdev, peer, tx.non_amsdu_cnt);
  2161. DP_STATS_AGGR(vdev, peer, tx.amsdu_cnt);
  2162. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard);
  2163. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_retired);
  2164. DP_STATS_AGGR(vdev, peer, tx.dropped.mpdu_age_out);
  2165. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason1);
  2166. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason2);
  2167. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason3);
  2168. DP_STATS_AGGR(vdev, peer, rx.err.mic_err);
  2169. DP_STATS_AGGR(vdev, peer, rx.err.decrypt_err);
  2170. DP_STATS_AGGR(vdev, peer, rx.non_ampdu_cnt);
  2171. DP_STATS_AGGR(vdev, peer, rx.ampdu_cnt);
  2172. DP_STATS_AGGR(vdev, peer, rx.non_amsdu_cnt);
  2173. DP_STATS_AGGR(vdev, peer, rx.amsdu_cnt);
  2174. DP_STATS_AGGR_PKT(vdev, peer, rx.to_stack);
  2175. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  2176. DP_STATS_AGGR_PKT(vdev, peer, rx.rcvd_reo[i]);
  2177. peer->stats.rx.unicast.num = peer->stats.rx.to_stack.num -
  2178. peer->stats.rx.multicast.num;
  2179. peer->stats.rx.unicast.bytes = peer->stats.rx.to_stack.bytes -
  2180. peer->stats.rx.multicast.bytes;
  2181. DP_STATS_AGGR_PKT(vdev, peer, rx.unicast);
  2182. DP_STATS_AGGR_PKT(vdev, peer, rx.multicast);
  2183. DP_STATS_AGGR_PKT(vdev, peer, rx.wds);
  2184. DP_STATS_AGGR_PKT(vdev, peer, rx.raw);
  2185. DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss.pkts);
  2186. DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss.fail);
  2187. vdev->stats.tx.last_ack_rssi =
  2188. peer->stats.tx.last_ack_rssi;
  2189. }
  2190. }
  2191. /**
  2192. * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
  2193. * @pdev: DP PDEV handle
  2194. *
  2195. * return: void
  2196. */
  2197. static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  2198. {
  2199. struct dp_vdev *vdev = NULL;
  2200. uint8_t i;
  2201. qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
  2202. qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
  2203. qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
  2204. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  2205. if (!vdev)
  2206. return;
  2207. dp_aggregate_vdev_stats(vdev);
  2208. for (i = 0; i <= MAX_MCS; i++) {
  2209. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[0].mcs_count[i]);
  2210. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[1].mcs_count[i]);
  2211. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[2].mcs_count[i]);
  2212. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[3].mcs_count[i]);
  2213. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[4].mcs_count[i]);
  2214. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[0].mcs_count[i]);
  2215. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[1].mcs_count[i]);
  2216. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[2].mcs_count[i]);
  2217. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[3].mcs_count[i]);
  2218. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[4].mcs_count[i]);
  2219. }
  2220. for (i = 0; i < SUPPORTED_BW; i++) {
  2221. DP_STATS_AGGR(pdev, vdev, tx.bw[i]);
  2222. DP_STATS_AGGR(pdev, vdev, rx.bw[i]);
  2223. }
  2224. for (i = 0; i < SS_COUNT; i++)
  2225. DP_STATS_AGGR(pdev, vdev, rx.nss[i]);
  2226. for (i = 0; i < WME_AC_MAX; i++) {
  2227. DP_STATS_AGGR(pdev, vdev, tx.wme_ac_type[i]);
  2228. DP_STATS_AGGR(pdev, vdev, rx.wme_ac_type[i]);
  2229. DP_STATS_AGGR(pdev, vdev,
  2230. tx.excess_retries_ac[i]);
  2231. }
  2232. for (i = 0; i < MAX_MCS + 1; i++) {
  2233. DP_STATS_AGGR(pdev, vdev, tx.sgi_count[i]);
  2234. DP_STATS_AGGR(pdev, vdev, rx.sgi_count[i]);
  2235. }
  2236. DP_STATS_AGGR_PKT(pdev, vdev, tx.comp_pkt);
  2237. DP_STATS_AGGR_PKT(pdev, vdev, tx.ucast);
  2238. DP_STATS_AGGR_PKT(pdev, vdev, tx.mcast);
  2239. DP_STATS_AGGR_PKT(pdev, vdev, tx.tx_success);
  2240. DP_STATS_AGGR(pdev, vdev, tx.tx_failed);
  2241. DP_STATS_AGGR(pdev, vdev, tx.ofdma);
  2242. DP_STATS_AGGR(pdev, vdev, tx.stbc);
  2243. DP_STATS_AGGR(pdev, vdev, tx.ldpc);
  2244. DP_STATS_AGGR(pdev, vdev, tx.retries);
  2245. DP_STATS_AGGR(pdev, vdev, tx.non_amsdu_cnt);
  2246. DP_STATS_AGGR(pdev, vdev, tx.amsdu_cnt);
  2247. DP_STATS_AGGR(pdev, vdev, tx.dropped.fw_discard);
  2248. DP_STATS_AGGR(pdev, vdev,
  2249. tx.dropped.fw_discard_retired);
  2250. DP_STATS_AGGR(pdev, vdev, tx.dropped.mpdu_age_out);
  2251. DP_STATS_AGGR(pdev, vdev,
  2252. tx.dropped.fw_discard_reason1);
  2253. DP_STATS_AGGR(pdev, vdev,
  2254. tx.dropped.fw_discard_reason2);
  2255. DP_STATS_AGGR(pdev, vdev,
  2256. tx.dropped.fw_discard_reason3);
  2257. DP_STATS_AGGR(pdev, vdev, rx.err.mic_err);
  2258. DP_STATS_AGGR(pdev, vdev, rx.err.decrypt_err);
  2259. DP_STATS_AGGR(pdev, vdev, rx.non_ampdu_cnt);
  2260. DP_STATS_AGGR(pdev, vdev, rx.ampdu_cnt);
  2261. DP_STATS_AGGR(pdev, vdev, rx.non_amsdu_cnt);
  2262. DP_STATS_AGGR(pdev, vdev, rx.amsdu_cnt);
  2263. DP_STATS_AGGR_PKT(pdev, vdev, rx.to_stack);
  2264. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[0]);
  2265. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[1]);
  2266. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[2]);
  2267. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[3]);
  2268. DP_STATS_AGGR_PKT(pdev, vdev, rx.unicast);
  2269. DP_STATS_AGGR_PKT(pdev, vdev, rx.multicast);
  2270. DP_STATS_AGGR_PKT(pdev, vdev, rx.wds);
  2271. DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.pkts);
  2272. DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.fail);
  2273. DP_STATS_AGGR_PKT(pdev, vdev, rx.raw);
  2274. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
  2275. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
  2276. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
  2277. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
  2278. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
  2279. DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
  2280. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
  2281. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
  2282. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
  2283. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
  2284. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
  2285. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
  2286. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
  2287. DP_STATS_AGGR(pdev, vdev,
  2288. tx_i.mcast_en.dropped_map_error);
  2289. DP_STATS_AGGR(pdev, vdev,
  2290. tx_i.mcast_en.dropped_self_mac);
  2291. DP_STATS_AGGR(pdev, vdev,
  2292. tx_i.mcast_en.dropped_send_fail);
  2293. DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
  2294. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
  2295. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
  2296. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
  2297. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
  2298. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
  2299. pdev->stats.tx_i.dropped.dropped_pkt.num =
  2300. pdev->stats.tx_i.dropped.dma_error +
  2301. pdev->stats.tx_i.dropped.ring_full +
  2302. pdev->stats.tx_i.dropped.enqueue_fail +
  2303. pdev->stats.tx_i.dropped.desc_na +
  2304. pdev->stats.tx_i.dropped.res_full;
  2305. pdev->stats.tx.last_ack_rssi =
  2306. vdev->stats.tx.last_ack_rssi;
  2307. pdev->stats.tx_i.tso.num_seg =
  2308. vdev->stats.tx_i.tso.num_seg;
  2309. }
  2310. }
  2311. /**
  2312. * dp_print_pdev_tx_stats(): Print Pdev level TX stats
  2313. * @pdev: DP_PDEV Handle
  2314. *
  2315. * Return:void
  2316. */
  2317. static inline void
  2318. dp_print_pdev_tx_stats(struct dp_pdev *pdev)
  2319. {
  2320. DP_TRACE_STATS(FATAL, "WLAN Tx Stats:\n");
  2321. DP_TRACE_STATS(FATAL, "Received From Stack:\n");
  2322. DP_TRACE_STATS(FATAL, "Packets = %d",
  2323. pdev->stats.tx_i.rcvd.num);
  2324. DP_TRACE_STATS(FATAL, "Bytes = %d\n",
  2325. pdev->stats.tx_i.rcvd.bytes);
  2326. DP_TRACE_STATS(FATAL, "Processed:\n");
  2327. DP_TRACE_STATS(FATAL, "Packets = %d",
  2328. pdev->stats.tx_i.processed.num);
  2329. DP_TRACE_STATS(FATAL, "Bytes = %d\n",
  2330. pdev->stats.tx_i.processed.bytes);
  2331. DP_TRACE_STATS(FATAL, "Completions:\n");
  2332. DP_TRACE_STATS(FATAL, "Packets = %d",
  2333. pdev->stats.tx.comp_pkt.num);
  2334. DP_TRACE_STATS(FATAL, "Bytes = %d\n",
  2335. pdev->stats.tx.comp_pkt.bytes);
  2336. DP_TRACE_STATS(FATAL, "Dropped:\n");
  2337. DP_TRACE_STATS(FATAL, "Packets = %d",
  2338. pdev->stats.tx_i.dropped.dropped_pkt.num);
  2339. DP_TRACE_STATS(FATAL, "Dma_map_error = %d",
  2340. pdev->stats.tx_i.dropped.dma_error);
  2341. DP_TRACE_STATS(FATAL, "Ring Full = %d",
  2342. pdev->stats.tx_i.dropped.ring_full);
  2343. DP_TRACE_STATS(FATAL, "Descriptor Not available = %d",
  2344. pdev->stats.tx_i.dropped.desc_na);
  2345. DP_TRACE_STATS(FATAL, "HW enqueue failed= %d",
  2346. pdev->stats.tx_i.dropped.enqueue_fail);
  2347. DP_TRACE_STATS(FATAL, "Resources Full = %d",
  2348. pdev->stats.tx_i.dropped.res_full);
  2349. DP_TRACE_STATS(FATAL, "Fw Discard = %d",
  2350. pdev->stats.tx.dropped.fw_discard);
  2351. DP_TRACE_STATS(FATAL, "Fw Discard Retired = %d",
  2352. pdev->stats.tx.dropped.fw_discard_retired);
  2353. DP_TRACE_STATS(FATAL, "Firmware Discard Untransmitted = %d",
  2354. pdev->stats.tx.dropped.fw_discard_untransmitted);
  2355. DP_TRACE_STATS(FATAL, "Mpdu Age Out = %d",
  2356. pdev->stats.tx.dropped.mpdu_age_out);
  2357. DP_TRACE_STATS(FATAL, "Firmware Discard Reason1 = %d",
  2358. pdev->stats.tx.dropped.fw_discard_reason1);
  2359. DP_TRACE_STATS(FATAL, "Firmware Discard Reason2 = %d",
  2360. pdev->stats.tx.dropped.fw_discard_reason2);
  2361. DP_TRACE_STATS(FATAL, "Firmware Discard Reason3 = %d\n",
  2362. pdev->stats.tx.dropped.fw_discard_reason3);
  2363. DP_TRACE_STATS(FATAL, "Scatter Gather:\n");
  2364. DP_TRACE_STATS(FATAL, "Packets = %d",
  2365. pdev->stats.tx_i.sg.sg_pkt.num);
  2366. DP_TRACE_STATS(FATAL, "Bytes = %d",
  2367. pdev->stats.tx_i.sg.sg_pkt.bytes);
  2368. DP_TRACE_STATS(FATAL, "Dropped By Host = %d",
  2369. pdev->stats.tx_i.sg.dropped_host);
  2370. DP_TRACE_STATS(FATAL, "Dropped By Target = %d\n",
  2371. pdev->stats.tx_i.sg.dropped_target);
  2372. DP_TRACE_STATS(FATAL, "Tso:\n");
  2373. DP_TRACE_STATS(FATAL, "Number of Segments = %d",
  2374. pdev->stats.tx_i.tso.num_seg);
  2375. DP_TRACE_STATS(FATAL, "Packets = %d",
  2376. pdev->stats.tx_i.tso.tso_pkt.num);
  2377. DP_TRACE_STATS(FATAL, "Bytes = %d",
  2378. pdev->stats.tx_i.tso.tso_pkt.bytes);
  2379. DP_TRACE_STATS(FATAL, "Dropped By Host = %d\n",
  2380. pdev->stats.tx_i.tso.dropped_host);
  2381. DP_TRACE_STATS(FATAL, "Mcast Enhancement:\n");
  2382. DP_TRACE_STATS(FATAL, "Packets = %d",
  2383. pdev->stats.tx_i.mcast_en.mcast_pkt.num);
  2384. DP_TRACE_STATS(FATAL, "Bytes = %d",
  2385. pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
  2386. DP_TRACE_STATS(FATAL, "Dropped: Map Errors = %d",
  2387. pdev->stats.tx_i.mcast_en.dropped_map_error);
  2388. DP_TRACE_STATS(FATAL, "Dropped: Self Mac = %d",
  2389. pdev->stats.tx_i.mcast_en.dropped_self_mac);
  2390. DP_TRACE_STATS(FATAL, "Dropped: Send Fail = %d",
  2391. pdev->stats.tx_i.mcast_en.dropped_send_fail);
  2392. DP_TRACE_STATS(FATAL, "Unicast sent = %d\n",
  2393. pdev->stats.tx_i.mcast_en.ucast);
  2394. DP_TRACE_STATS(FATAL, "Raw:\n");
  2395. DP_TRACE_STATS(FATAL, "Packets = %d",
  2396. pdev->stats.tx_i.raw.raw_pkt.num);
  2397. DP_TRACE_STATS(FATAL, "Bytes = %d",
  2398. pdev->stats.tx_i.raw.raw_pkt.bytes);
  2399. DP_TRACE_STATS(FATAL, "DMA map error = %d\n",
  2400. pdev->stats.tx_i.raw.dma_map_error);
  2401. DP_TRACE_STATS(FATAL, "Reinjected:\n");
  2402. DP_TRACE_STATS(FATAL, "Packets = %d",
  2403. pdev->stats.tx_i.reinject_pkts.num);
  2404. DP_TRACE_STATS(FATAL, "Bytes = %d\n",
  2405. pdev->stats.tx_i.reinject_pkts.bytes);
  2406. DP_TRACE_STATS(FATAL, "Inspected:\n");
  2407. DP_TRACE_STATS(FATAL, "Packets = %d",
  2408. pdev->stats.tx_i.inspect_pkts.num);
  2409. DP_TRACE_STATS(FATAL, "Bytes = %d\n",
  2410. pdev->stats.tx_i.inspect_pkts.bytes);
  2411. }
  2412. /**
  2413. * dp_print_pdev_rx_stats(): Print Pdev level RX stats
  2414. * @pdev: DP_PDEV Handle
  2415. *
  2416. * Return: void
  2417. */
  2418. static inline void
  2419. dp_print_pdev_rx_stats(struct dp_pdev *pdev)
  2420. {
  2421. DP_TRACE_STATS(FATAL, "WLAN Rx Stats:\n");
  2422. DP_TRACE_STATS(FATAL, "Received From HW (Per Rx Ring):\n");
  2423. DP_TRACE_STATS(FATAL, "Packets = %d %d %d %d",
  2424. pdev->stats.rx.rcvd_reo[0].num,
  2425. pdev->stats.rx.rcvd_reo[1].num,
  2426. pdev->stats.rx.rcvd_reo[2].num,
  2427. pdev->stats.rx.rcvd_reo[3].num);
  2428. DP_TRACE_STATS(FATAL, "Bytes = %d %d %d %d\n",
  2429. pdev->stats.rx.rcvd_reo[0].bytes,
  2430. pdev->stats.rx.rcvd_reo[1].bytes,
  2431. pdev->stats.rx.rcvd_reo[2].bytes,
  2432. pdev->stats.rx.rcvd_reo[3].bytes);
  2433. DP_TRACE_STATS(FATAL, "Replenished:\n");
  2434. DP_TRACE_STATS(FATAL, "Packets = %d",
  2435. pdev->stats.replenish.pkts.num);
  2436. DP_TRACE_STATS(FATAL, "Bytes = %d",
  2437. pdev->stats.replenish.pkts.bytes);
  2438. DP_TRACE_STATS(FATAL, "Buffers Added To Freelist = %d\n",
  2439. pdev->stats.buf_freelist);
  2440. DP_TRACE_STATS(FATAL, "Dropped:\n");
  2441. DP_TRACE_STATS(FATAL, "Total Packets With Msdu Not Done = %d\n",
  2442. pdev->stats.dropped.msdu_not_done);
  2443. DP_TRACE_STATS(FATAL, "Sent To Stack:\n");
  2444. DP_TRACE_STATS(FATAL, "Packets = %d",
  2445. pdev->stats.rx.to_stack.num);
  2446. DP_TRACE_STATS(FATAL, "Bytes = %d\n",
  2447. pdev->stats.rx.to_stack.bytes);
  2448. DP_TRACE_STATS(FATAL, "Multicast/Broadcast:\n");
  2449. DP_TRACE_STATS(FATAL, "Packets = %d",
  2450. pdev->stats.rx.multicast.num);
  2451. DP_TRACE_STATS(FATAL, "Bytes = %d\n",
  2452. pdev->stats.rx.multicast.bytes);
  2453. DP_TRACE_STATS(FATAL, "Errors:\n");
  2454. DP_TRACE_STATS(FATAL, "Rxdma Ring Un-inititalized = %d",
  2455. pdev->stats.replenish.rxdma_err);
  2456. DP_TRACE_STATS(FATAL, "Desc Alloc Failed: = %d",
  2457. pdev->stats.err.desc_alloc_fail);
  2458. }
  2459. /**
  2460. * dp_print_soc_tx_stats(): Print SOC level stats
  2461. * @soc DP_SOC Handle
  2462. *
  2463. * Return: void
  2464. */
  2465. static inline void
  2466. dp_print_soc_tx_stats(struct dp_soc *soc)
  2467. {
  2468. DP_TRACE_STATS(FATAL, "SOC Tx Stats:\n");
  2469. DP_TRACE_STATS(FATAL, "Tx Descriptors In Use = %d",
  2470. soc->stats.tx.desc_in_use);
  2471. DP_TRACE_STATS(FATAL, "Invalid peer:\n");
  2472. DP_TRACE_STATS(FATAL, "Packets = %d",
  2473. soc->stats.tx.tx_invalid_peer.num);
  2474. DP_TRACE_STATS(FATAL, "Bytes = %d",
  2475. soc->stats.tx.tx_invalid_peer.bytes);
  2476. DP_TRACE_STATS(FATAL, "Packets dropped due to TCL ring full = %d %d %d",
  2477. soc->stats.tx.tcl_ring_full[0],
  2478. soc->stats.tx.tcl_ring_full[1],
  2479. soc->stats.tx.tcl_ring_full[2]);
  2480. }
  2481. /**
  2482. * dp_print_soc_rx_stats: Print SOC level Rx stats
  2483. * @soc: DP_SOC Handle
  2484. *
  2485. * Return:void
  2486. */
  2487. static inline void
  2488. dp_print_soc_rx_stats(struct dp_soc *soc)
  2489. {
  2490. uint32_t i;
  2491. char reo_error[DP_REO_ERR_LENGTH];
  2492. char rxdma_error[DP_RXDMA_ERR_LENGTH];
  2493. uint8_t index = 0;
  2494. DP_TRACE_STATS(FATAL, "SOC Rx Stats:\n");
  2495. DP_TRACE_STATS(FATAL, "Errors:\n");
  2496. DP_TRACE_STATS(FATAL, "Invalid RBM = %d",
  2497. soc->stats.rx.err.invalid_rbm);
  2498. DP_TRACE_STATS(FATAL, "Invalid Vdev = %d",
  2499. soc->stats.rx.err.invalid_vdev);
  2500. DP_TRACE_STATS(FATAL, "Invalid Pdev = %d",
  2501. soc->stats.rx.err.invalid_pdev);
  2502. DP_TRACE_STATS(FATAL, "Invalid Peer = %d",
  2503. soc->stats.rx.err.rx_invalid_peer.num);
  2504. DP_TRACE_STATS(FATAL, "HAL Ring Access Fail = %d",
  2505. soc->stats.rx.err.hal_ring_access_fail);
  2506. for (i = 0; i < MAX_RXDMA_ERRORS; i++) {
  2507. index += qdf_snprint(&rxdma_error[index],
  2508. DP_RXDMA_ERR_LENGTH - index,
  2509. " %d", soc->stats.rx.err.rxdma_error[i]);
  2510. }
  2511. DP_TRACE_STATS(FATAL, "RXDMA Error (0-31):%s",
  2512. rxdma_error);
  2513. index = 0;
  2514. for (i = 0; i < REO_ERROR_TYPE_MAX; i++) {
  2515. index += qdf_snprint(&reo_error[index],
  2516. DP_REO_ERR_LENGTH - index,
  2517. " %d", soc->stats.rx.err.reo_error[i]);
  2518. }
  2519. DP_TRACE_STATS(FATAL, "REO Error(0-14):%s",
  2520. reo_error);
  2521. }
  2522. /**
  2523. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  2524. * @vdev: DP_VDEV handle
  2525. *
  2526. * Return:void
  2527. */
  2528. static inline void
  2529. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  2530. {
  2531. struct dp_peer *peer = NULL;
  2532. DP_STATS_CLR(vdev->pdev);
  2533. DP_STATS_CLR(vdev->pdev->soc);
  2534. DP_STATS_CLR(vdev);
  2535. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2536. if (!peer)
  2537. return;
  2538. DP_STATS_CLR(peer);
  2539. }
  2540. }
  2541. /**
  2542. * dp_print_rx_rates(): Print Rx rate stats
  2543. * @vdev: DP_VDEV handle
  2544. *
  2545. * Return:void
  2546. */
  2547. static inline void
  2548. dp_print_rx_rates(struct dp_vdev *vdev)
  2549. {
  2550. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  2551. uint8_t i, pkt_type;
  2552. uint8_t index = 0;
  2553. char rx_mcs[DOT11_MAX][DP_MCS_LENGTH];
  2554. char nss[DP_NSS_LENGTH];
  2555. DP_TRACE_STATS(FATAL, "Rx Rate Info:\n");
  2556. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2557. index = 0;
  2558. for (i = 0; i < MAX_MCS; i++) {
  2559. index += qdf_snprint(&rx_mcs[pkt_type][index],
  2560. DP_MCS_LENGTH - index,
  2561. " %d ",
  2562. pdev->stats.rx.pkt_type[pkt_type].
  2563. mcs_count[i]);
  2564. }
  2565. }
  2566. DP_TRACE_STATS(FATAL, "11A MCS(0-7) = %s",
  2567. rx_mcs[0]);
  2568. DP_TRACE_STATS(FATAL, "11A MCS Invalid = %d",
  2569. pdev->stats.rx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2570. DP_TRACE_STATS(FATAL, "11B MCS(0-6) = %s",
  2571. rx_mcs[1]);
  2572. DP_TRACE_STATS(FATAL, "11B MCS Invalid = %d",
  2573. pdev->stats.rx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2574. DP_TRACE_STATS(FATAL, "11N MCS(0-7) = %s",
  2575. rx_mcs[2]);
  2576. DP_TRACE_STATS(FATAL, "11N MCS Invalid = %d",
  2577. pdev->stats.rx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2578. DP_TRACE_STATS(FATAL, "Type 11AC MCS(0-9) = %s",
  2579. rx_mcs[3]);
  2580. DP_TRACE_STATS(FATAL, "11AC MCS Invalid = %d",
  2581. pdev->stats.rx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2582. DP_TRACE_STATS(FATAL, "11AX MCS(0-11) = %s",
  2583. rx_mcs[4]);
  2584. DP_TRACE_STATS(FATAL, "11AX MCS Invalid = %d",
  2585. pdev->stats.rx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2586. index = 0;
  2587. for (i = 0; i < SS_COUNT; i++) {
  2588. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  2589. " %d", pdev->stats.rx.nss[i]);
  2590. }
  2591. DP_TRACE_STATS(FATAL, "NSS(0-7) = %s",
  2592. nss);
  2593. DP_TRACE_STATS(FATAL, "SGI ="
  2594. " 0.8us %d,"
  2595. " 0.4us %d,"
  2596. " 1.6us %d,"
  2597. " 3.2us %d,",
  2598. pdev->stats.rx.sgi_count[0],
  2599. pdev->stats.rx.sgi_count[1],
  2600. pdev->stats.rx.sgi_count[2],
  2601. pdev->stats.rx.sgi_count[3]);
  2602. DP_TRACE_STATS(FATAL, "BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  2603. pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
  2604. pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
  2605. DP_TRACE_STATS(FATAL, "Reception Type ="
  2606. " SU: %d,"
  2607. " MU_MIMO:%d,"
  2608. " MU_OFDMA:%d,"
  2609. " MU_OFDMA_MIMO:%d\n",
  2610. pdev->stats.rx.reception_type[0],
  2611. pdev->stats.rx.reception_type[1],
  2612. pdev->stats.rx.reception_type[2],
  2613. pdev->stats.rx.reception_type[3]);
  2614. DP_TRACE_STATS(FATAL, "Aggregation:\n");
  2615. DP_TRACE_STATS(FATAL, "Number of Msdu's Part of Ampdus = %d",
  2616. pdev->stats.rx.ampdu_cnt);
  2617. DP_TRACE_STATS(FATAL, "Number of Msdu's With No Mpdu Level Aggregation : %d",
  2618. pdev->stats.rx.non_ampdu_cnt);
  2619. DP_TRACE_STATS(FATAL, "Number of Msdu's Part of Amsdu: %d",
  2620. pdev->stats.rx.amsdu_cnt);
  2621. DP_TRACE_STATS(FATAL, "Number of Msdu's With No Msdu Level Aggregation: %d",
  2622. pdev->stats.rx.non_amsdu_cnt);
  2623. }
  2624. /**
  2625. * dp_print_tx_rates(): Print tx rates
  2626. * @vdev: DP_VDEV handle
  2627. *
  2628. * Return:void
  2629. */
  2630. static inline void
  2631. dp_print_tx_rates(struct dp_vdev *vdev)
  2632. {
  2633. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  2634. uint8_t i, pkt_type;
  2635. char mcs[DOT11_MAX][DP_MCS_LENGTH];
  2636. uint32_t index;
  2637. DP_TRACE_STATS(FATAL, "Tx Rate Info:\n");
  2638. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2639. index = 0;
  2640. for (i = 0; i < MAX_MCS; i++) {
  2641. index += qdf_snprint(&mcs[pkt_type][index],
  2642. DP_MCS_LENGTH - index,
  2643. " %d ",
  2644. pdev->stats.tx.pkt_type[pkt_type].
  2645. mcs_count[i]);
  2646. }
  2647. }
  2648. DP_TRACE_STATS(FATAL, "11A MCS(0-7) = %s",
  2649. mcs[0]);
  2650. DP_TRACE_STATS(FATAL, "11A MCS Invalid = %d",
  2651. pdev->stats.tx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2652. DP_TRACE_STATS(FATAL, "11B MCS(0-6) = %s",
  2653. mcs[1]);
  2654. DP_TRACE_STATS(FATAL, "11B MCS Invalid = %d",
  2655. pdev->stats.tx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2656. DP_TRACE_STATS(FATAL, "11N MCS(0-7) = %s",
  2657. mcs[2]);
  2658. DP_TRACE_STATS(FATAL, "11N MCS Invalid = %d",
  2659. pdev->stats.tx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2660. DP_TRACE_STATS(FATAL, "Type 11AC MCS(0-9) = %s",
  2661. mcs[3]);
  2662. DP_TRACE_STATS(FATAL, "11AC MCS Invalid = %d",
  2663. pdev->stats.tx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2664. DP_TRACE_STATS(FATAL, "11AX MCS(0-11) = %s",
  2665. mcs[4]);
  2666. DP_TRACE_STATS(FATAL, "11AX MCS Invalid = %d",
  2667. pdev->stats.tx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2668. DP_TRACE_STATS(FATAL, "SGI ="
  2669. " 0.8us %d"
  2670. " 0.4us %d"
  2671. " 1.6us %d"
  2672. " 3.2us %d",
  2673. pdev->stats.tx.sgi_count[0],
  2674. pdev->stats.tx.sgi_count[1],
  2675. pdev->stats.tx.sgi_count[2],
  2676. pdev->stats.tx.sgi_count[3]);
  2677. DP_TRACE_STATS(FATAL, "BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  2678. pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
  2679. pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
  2680. DP_TRACE_STATS(FATAL, "OFDMA = %d", pdev->stats.tx.ofdma);
  2681. DP_TRACE_STATS(FATAL, "STBC = %d", pdev->stats.tx.stbc);
  2682. DP_TRACE_STATS(FATAL, "LDPC = %d", pdev->stats.tx.ldpc);
  2683. DP_TRACE_STATS(FATAL, "Retries = %d", pdev->stats.tx.retries);
  2684. DP_TRACE_STATS(FATAL, "Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
  2685. DP_TRACE_STATS(FATAL, "Aggregation:\n");
  2686. DP_TRACE_STATS(FATAL, "Number of Msdu's Part of Amsdu = %d",
  2687. pdev->stats.tx.amsdu_cnt);
  2688. DP_TRACE_STATS(FATAL, "Number of Msdu's With No Msdu Level Aggregation = %d",
  2689. pdev->stats.tx.non_amsdu_cnt);
  2690. }
  2691. /**
  2692. * dp_print_peer_stats():print peer stats
  2693. * @peer: DP_PEER handle
  2694. *
  2695. * return void
  2696. */
  2697. static inline void dp_print_peer_stats(struct dp_peer *peer)
  2698. {
  2699. uint8_t i, pkt_type;
  2700. char tx_mcs[DOT11_MAX][DP_MCS_LENGTH];
  2701. char rx_mcs[DOT11_MAX][DP_MCS_LENGTH];
  2702. uint32_t index;
  2703. char nss[DP_NSS_LENGTH];
  2704. DP_TRACE_STATS(FATAL, "Node Tx Stats:\n");
  2705. DP_TRACE_STATS(FATAL, "Total Packet Completions = %d",
  2706. peer->stats.tx.comp_pkt.num);
  2707. DP_TRACE_STATS(FATAL, "Total Bytes Completions = %d",
  2708. peer->stats.tx.comp_pkt.bytes);
  2709. DP_TRACE_STATS(FATAL, "Success Packets = %d",
  2710. peer->stats.tx.tx_success.num);
  2711. DP_TRACE_STATS(FATAL, "Success Bytes = %d",
  2712. peer->stats.tx.tx_success.bytes);
  2713. DP_TRACE_STATS(FATAL, "Packets Failed = %d",
  2714. peer->stats.tx.tx_failed);
  2715. DP_TRACE_STATS(FATAL, "Packets In OFDMA = %d",
  2716. peer->stats.tx.ofdma);
  2717. DP_TRACE_STATS(FATAL, "Packets In STBC = %d",
  2718. peer->stats.tx.stbc);
  2719. DP_TRACE_STATS(FATAL, "Packets In LDPC = %d",
  2720. peer->stats.tx.ldpc);
  2721. DP_TRACE_STATS(FATAL, "Packet Retries = %d",
  2722. peer->stats.tx.retries);
  2723. DP_TRACE_STATS(FATAL, "Msdu's Not Part of Ampdu = %d",
  2724. peer->stats.tx.non_amsdu_cnt);
  2725. DP_TRACE_STATS(FATAL, "Mpdu's Part of Ampdu = %d",
  2726. peer->stats.tx.amsdu_cnt);
  2727. DP_TRACE_STATS(FATAL, "Last Packet RSSI = %d",
  2728. peer->stats.tx.last_ack_rssi);
  2729. DP_TRACE_STATS(FATAL, "Dropped At FW: FW Discard = %d",
  2730. peer->stats.tx.dropped.fw_discard);
  2731. DP_TRACE_STATS(FATAL, "Dropped At FW: FW Discard Retired = %d",
  2732. peer->stats.tx.dropped.fw_discard_retired);
  2733. DP_TRACE_STATS(FATAL, "Dropped At FW: FW Discard Untransmitted = %d",
  2734. peer->stats.tx.dropped.fw_discard_untransmitted);
  2735. DP_TRACE_STATS(FATAL, "Dropped : Mpdu Age Out = %d",
  2736. peer->stats.tx.dropped.mpdu_age_out);
  2737. DP_TRACE_STATS(FATAL, "Dropped : FW Discard Reason1 = %d",
  2738. peer->stats.tx.dropped.fw_discard_reason1);
  2739. DP_TRACE_STATS(FATAL, "Dropped : FW Discard Reason2 = %d",
  2740. peer->stats.tx.dropped.fw_discard_reason2);
  2741. DP_TRACE_STATS(FATAL, "Dropped : FW Discard Reason3 = %d",
  2742. peer->stats.tx.dropped.fw_discard_reason3);
  2743. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2744. index = 0;
  2745. for (i = 0; i < MAX_MCS; i++) {
  2746. index += qdf_snprint(&tx_mcs[pkt_type][index],
  2747. DP_MCS_LENGTH - index,
  2748. " %d ",
  2749. peer->stats.tx.pkt_type[pkt_type].
  2750. mcs_count[i]);
  2751. }
  2752. }
  2753. DP_TRACE_STATS(FATAL, "11A MCS(0-7) = %s",
  2754. tx_mcs[0]);
  2755. DP_TRACE_STATS(FATAL, "11A MCS Invalid = %d",
  2756. peer->stats.tx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2757. DP_TRACE_STATS(FATAL, "11B MCS(0-6) = %s",
  2758. tx_mcs[1]);
  2759. DP_TRACE_STATS(FATAL, "11B MCS Invalid = %d",
  2760. peer->stats.tx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2761. DP_TRACE_STATS(FATAL, "11N MCS(0-7) = %s",
  2762. tx_mcs[2]);
  2763. DP_TRACE_STATS(FATAL, "11N MCS Invalid = %d",
  2764. peer->stats.tx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2765. DP_TRACE_STATS(FATAL, "11AC MCS(0-9) = %s",
  2766. tx_mcs[3]);
  2767. DP_TRACE_STATS(FATAL, "11AC MCS Invalid = %d",
  2768. peer->stats.tx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2769. DP_TRACE_STATS(FATAL, "11AX MCS(0-11) = %s",
  2770. tx_mcs[4]);
  2771. DP_TRACE_STATS(FATAL, "11AX MCS Invalid = %d",
  2772. peer->stats.tx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2773. DP_TRACE_STATS(FATAL, "SGI = "
  2774. " 0.8us %d"
  2775. " 0.4us %d"
  2776. " 1.6us %d"
  2777. " 3.2us %d",
  2778. peer->stats.tx.sgi_count[0],
  2779. peer->stats.tx.sgi_count[1],
  2780. peer->stats.tx.sgi_count[2],
  2781. peer->stats.tx.sgi_count[3]);
  2782. DP_TRACE_STATS(FATAL, "BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
  2783. peer->stats.tx.bw[0], peer->stats.tx.bw[1],
  2784. peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
  2785. DP_TRACE_STATS(FATAL, "Aggregation:\n");
  2786. DP_TRACE_STATS(FATAL, "Number of Msdu's Part of Amsdu = %d",
  2787. peer->stats.tx.amsdu_cnt);
  2788. DP_TRACE_STATS(FATAL, "Number of Msdu's With No Msdu Level Aggregation = %d\n",
  2789. peer->stats.tx.non_amsdu_cnt);
  2790. DP_TRACE_STATS(FATAL, "Node Rx Stats:\n");
  2791. DP_TRACE_STATS(FATAL, "Packets Sent To Stack = %d",
  2792. peer->stats.rx.to_stack.num);
  2793. DP_TRACE_STATS(FATAL, "Bytes Sent To Stack = %d",
  2794. peer->stats.rx.to_stack.bytes);
  2795. for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
  2796. DP_TRACE_STATS(FATAL, "Packets Received = %d",
  2797. peer->stats.rx.rcvd_reo[i].num);
  2798. DP_TRACE_STATS(FATAL, "Bytes Received = %d",
  2799. peer->stats.rx.rcvd_reo[i].bytes);
  2800. }
  2801. DP_TRACE_STATS(FATAL, "Multicast Packets Received = %d",
  2802. peer->stats.rx.multicast.num);
  2803. DP_TRACE_STATS(FATAL, "Multicast Bytes Received = %d",
  2804. peer->stats.rx.multicast.bytes);
  2805. DP_TRACE_STATS(FATAL, "WDS Packets Received = %d",
  2806. peer->stats.rx.wds.num);
  2807. DP_TRACE_STATS(FATAL, "WDS Bytes Received = %d",
  2808. peer->stats.rx.wds.bytes);
  2809. DP_TRACE_STATS(FATAL, "Intra BSS Packets Received = %d",
  2810. peer->stats.rx.intra_bss.pkts.num);
  2811. DP_TRACE_STATS(FATAL, "Intra BSS Bytes Received = %d",
  2812. peer->stats.rx.intra_bss.pkts.bytes);
  2813. DP_TRACE_STATS(FATAL, "Raw Packets Received = %d",
  2814. peer->stats.rx.raw.num);
  2815. DP_TRACE_STATS(FATAL, "Raw Bytes Received = %d",
  2816. peer->stats.rx.raw.bytes);
  2817. DP_TRACE_STATS(FATAL, "Errors: MIC Errors = %d",
  2818. peer->stats.rx.err.mic_err);
  2819. DP_TRACE_STATS(FATAL, "Erros: Decryption Errors = %d",
  2820. peer->stats.rx.err.decrypt_err);
  2821. DP_TRACE_STATS(FATAL, "Msdu's Received As Part of Ampdu = %d",
  2822. peer->stats.rx.non_ampdu_cnt);
  2823. DP_TRACE_STATS(FATAL, "Msdu's Recived As Ampdu = %d",
  2824. peer->stats.rx.ampdu_cnt);
  2825. DP_TRACE_STATS(FATAL, "Msdu's Received Not Part of Amsdu's = %d",
  2826. peer->stats.rx.non_amsdu_cnt);
  2827. DP_TRACE_STATS(FATAL, "MSDUs Received As Part of Amsdu = %d",
  2828. peer->stats.rx.amsdu_cnt);
  2829. DP_TRACE_STATS(FATAL, "SGI ="
  2830. " 0.8us %d"
  2831. " 0.4us %d"
  2832. " 1.6us %d"
  2833. " 3.2us %d",
  2834. peer->stats.rx.sgi_count[0],
  2835. peer->stats.rx.sgi_count[1],
  2836. peer->stats.rx.sgi_count[2],
  2837. peer->stats.rx.sgi_count[3]);
  2838. DP_TRACE_STATS(FATAL, "BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
  2839. peer->stats.rx.bw[0], peer->stats.rx.bw[1],
  2840. peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
  2841. DP_TRACE_STATS(FATAL, "Reception Type ="
  2842. " SU %d,"
  2843. " MU_MIMO %d,"
  2844. " MU_OFDMA %d,"
  2845. " MU_OFDMA_MIMO %d",
  2846. peer->stats.rx.reception_type[0],
  2847. peer->stats.rx.reception_type[1],
  2848. peer->stats.rx.reception_type[2],
  2849. peer->stats.rx.reception_type[3]);
  2850. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2851. index = 0;
  2852. for (i = 0; i < MAX_MCS; i++) {
  2853. index += qdf_snprint(&rx_mcs[pkt_type][index],
  2854. DP_MCS_LENGTH - index,
  2855. " %d ",
  2856. peer->stats.rx.pkt_type[pkt_type].
  2857. mcs_count[i]);
  2858. }
  2859. }
  2860. DP_TRACE_STATS(FATAL, "11A MCS(0-7) = %s",
  2861. rx_mcs[0]);
  2862. DP_TRACE_STATS(FATAL, "11A MCS Invalid = %d",
  2863. peer->stats.rx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2864. DP_TRACE_STATS(FATAL, "11B MCS(0-6) = %s",
  2865. rx_mcs[1]);
  2866. DP_TRACE_STATS(FATAL, "11B MCS Invalid = %d",
  2867. peer->stats.rx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2868. DP_TRACE_STATS(FATAL, "11N MCS(0-7) = %s",
  2869. rx_mcs[2]);
  2870. DP_TRACE_STATS(FATAL, "11N MCS Invalid = %d",
  2871. peer->stats.rx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2872. DP_TRACE_STATS(FATAL, "11AC MCS(0-9) = %s",
  2873. rx_mcs[3]);
  2874. DP_TRACE_STATS(FATAL, "11AC MCS Invalid = %d",
  2875. peer->stats.rx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2876. DP_TRACE_STATS(FATAL, "11AX MCS(0-11) = %s",
  2877. rx_mcs[4]);
  2878. DP_TRACE_STATS(FATAL, "11AX MCS Invalid = %d",
  2879. peer->stats.rx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2880. index = 0;
  2881. for (i = 0; i < SS_COUNT; i++) {
  2882. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  2883. " %d", peer->stats.rx.nss[i]);
  2884. }
  2885. DP_TRACE_STATS(FATAL, "NSS(0-7) = %s\n",
  2886. nss);
  2887. DP_TRACE_STATS(FATAL, "Aggregation:\n");
  2888. DP_TRACE_STATS(FATAL, "Number of Msdu's Part of Ampdu = %d",
  2889. peer->stats.rx.ampdu_cnt);
  2890. DP_TRACE_STATS(FATAL, "Number of Msdu's With No Mpdu Level Aggregation = %d",
  2891. peer->stats.rx.non_ampdu_cnt);
  2892. DP_TRACE_STATS(FATAL, "Number of Msdu's Part of Amsdu = %d",
  2893. peer->stats.rx.amsdu_cnt);
  2894. DP_TRACE_STATS(FATAL, "Number of Msdu's With No Msdu Level Aggregation = %d",
  2895. peer->stats.rx.non_amsdu_cnt);
  2896. }
  2897. /**
  2898. * dp_print_host_stats()- Function to print the stats aggregated at host
  2899. * @vdev_handle: DP_VDEV handle
  2900. * @type: host stats type
  2901. *
  2902. * Available Stat types
  2903. * TXRX_CLEAR_STATS : Clear the stats
  2904. * TXRX_RX_RATE_STATS: Print Rx Rate Info
  2905. * TXRX_TX_RATE_STATS: Print Tx Rate Info
  2906. * TXRX_TX_HOST_STATS: Print Tx Stats
  2907. * TXRX_RX_HOST_STATS: Print Rx Stats
  2908. *
  2909. * Return: 0 on success, print error message in case of failure
  2910. */
  2911. static int
  2912. dp_print_host_stats(struct cdp_vdev *vdev_handle, enum cdp_host_txrx_stats type)
  2913. {
  2914. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2915. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  2916. dp_aggregate_pdev_stats(pdev);
  2917. switch (type) {
  2918. case TXRX_CLEAR_STATS:
  2919. dp_txrx_host_stats_clr(vdev);
  2920. break;
  2921. case TXRX_RX_RATE_STATS:
  2922. dp_print_rx_rates(vdev);
  2923. break;
  2924. case TXRX_TX_RATE_STATS:
  2925. dp_print_tx_rates(vdev);
  2926. break;
  2927. case TXRX_TX_HOST_STATS:
  2928. dp_print_pdev_tx_stats(pdev);
  2929. dp_print_soc_tx_stats(pdev->soc);
  2930. break;
  2931. case TXRX_RX_HOST_STATS:
  2932. dp_print_pdev_rx_stats(pdev);
  2933. dp_print_soc_rx_stats(pdev->soc);
  2934. break;
  2935. default:
  2936. DP_TRACE(FATAL, "Wrong Input For TxRx Host Stats");
  2937. break;
  2938. }
  2939. return 0;
  2940. }
  2941. /*
  2942. * dp_get_host_peer_stats()- function to print peer stats
  2943. * @pdev_handle: DP_PDEV handle
  2944. * @mac_addr: mac address of the peer
  2945. *
  2946. * Return: void
  2947. */
  2948. static void
  2949. dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
  2950. {
  2951. struct dp_peer *peer;
  2952. uint8_t local_id;
  2953. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
  2954. &local_id);
  2955. dp_print_peer_stats(peer);
  2956. return;
  2957. }
  2958. /*
  2959. * dp_get_fw_peer_stats()- function to print peer stats
  2960. * @pdev_handle: DP_PDEV handle
  2961. * @mac_addr: mac address of the peer
  2962. * @cap: Type of htt stats requested
  2963. *
  2964. * Currently Supporting only MAC ID based requests Only
  2965. * 1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
  2966. * 2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
  2967. * 3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
  2968. *
  2969. * Return: void
  2970. */
  2971. static void
  2972. dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
  2973. uint32_t cap)
  2974. {
  2975. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2976. uint32_t config_param0 = 0;
  2977. uint32_t config_param1 = 0;
  2978. uint32_t config_param2 = 0;
  2979. uint32_t config_param3 = 0;
  2980. HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
  2981. config_param0 |= (1 << (cap + 1));
  2982. config_param1 = 0x8f;
  2983. config_param2 |= (mac_addr[0] & 0x000000ff);
  2984. config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
  2985. config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
  2986. config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
  2987. config_param3 |= (mac_addr[4] & 0x000000ff);
  2988. config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
  2989. dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
  2990. config_param0, config_param1, config_param2,
  2991. config_param3);
  2992. }
  2993. /*
  2994. * dp_set_vdev_param: function to set parameters in vdev
  2995. * @param: parameter type to be set
  2996. * @val: value of parameter to be set
  2997. *
  2998. * return: void
  2999. */
  3000. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  3001. enum cdp_vdev_param_type param, uint32_t val)
  3002. {
  3003. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3004. switch (param) {
  3005. case CDP_ENABLE_WDS:
  3006. vdev->wds_enabled = val;
  3007. break;
  3008. case CDP_ENABLE_NAWDS:
  3009. vdev->nawds_enabled = val;
  3010. break;
  3011. case CDP_ENABLE_MCAST_EN:
  3012. vdev->mcast_enhancement_en = val;
  3013. break;
  3014. case CDP_ENABLE_PROXYSTA:
  3015. vdev->proxysta_vdev = val;
  3016. break;
  3017. case CDP_UPDATE_TDLS_FLAGS:
  3018. vdev->tdls_link_connected = val;
  3019. break;
  3020. default:
  3021. break;
  3022. }
  3023. dp_tx_vdev_update_search_flags(vdev);
  3024. }
  3025. /**
  3026. * dp_peer_set_nawds: set nawds bit in peer
  3027. * @peer_handle: pointer to peer
  3028. * @value: enable/disable nawds
  3029. *
  3030. * return: void
  3031. */
  3032. static void dp_peer_set_nawds(void *peer_handle, uint8_t value)
  3033. {
  3034. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  3035. peer->nawds_enabled = value;
  3036. }
  3037. /*
  3038. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  3039. * @vdev_handle: DP_VDEV handle
  3040. * @map_id:ID of map that needs to be updated
  3041. *
  3042. * Return: void
  3043. */
  3044. static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
  3045. uint8_t map_id)
  3046. {
  3047. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3048. vdev->dscp_tid_map_id = map_id;
  3049. return;
  3050. }
  3051. /**
  3052. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  3053. * @pdev: DP_PDEV handle
  3054. * @map_id: ID of map that needs to be updated
  3055. * @tos: index value in map
  3056. * @tid: tid value passed by the user
  3057. *
  3058. * Return: void
  3059. */
  3060. static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
  3061. uint8_t map_id, uint8_t tos, uint8_t tid)
  3062. {
  3063. uint8_t dscp;
  3064. struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
  3065. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  3066. pdev->dscp_tid_map[map_id][dscp] = tid;
  3067. hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
  3068. map_id, dscp);
  3069. return;
  3070. }
  3071. /**
  3072. * dp_fw_stats_process(): Process TxRX FW stats request
  3073. * @vdev_handle: DP VDEV handle
  3074. * @val: value passed by user
  3075. *
  3076. * return: int
  3077. */
  3078. static int dp_fw_stats_process(struct cdp_vdev *vdev_handle, uint32_t val)
  3079. {
  3080. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3081. struct dp_pdev *pdev = NULL;
  3082. if (!vdev) {
  3083. DP_TRACE(NONE, "VDEV not found");
  3084. return 1;
  3085. }
  3086. pdev = vdev->pdev;
  3087. return dp_h2t_ext_stats_msg_send(pdev, val, 0, 0, 0, 0);
  3088. }
  3089. /*
  3090. * dp_txrx_stats() - function to map to firmware and host stats
  3091. * @vdev: virtual handle
  3092. * @stats: type of statistics requested
  3093. *
  3094. * Return: integer
  3095. */
  3096. static int dp_txrx_stats(struct cdp_vdev *vdev, enum cdp_stats stats)
  3097. {
  3098. int host_stats;
  3099. int fw_stats;
  3100. if (stats >= CDP_TXRX_MAX_STATS)
  3101. return 0;
  3102. /*
  3103. * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
  3104. * has to be updated if new FW HTT stats added
  3105. */
  3106. if (stats > CDP_TXRX_STATS_HTT_MAX)
  3107. stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
  3108. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  3109. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  3110. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3111. "stats: %u fw_stats_type: %d host_stats_type: %d",
  3112. stats, fw_stats, host_stats);
  3113. if (fw_stats != TXRX_FW_STATS_INVALID)
  3114. return dp_fw_stats_process(vdev, fw_stats);
  3115. if ((host_stats != TXRX_HOST_STATS_INVALID) &&
  3116. (host_stats <= TXRX_HOST_STATS_MAX))
  3117. return dp_print_host_stats(vdev, host_stats);
  3118. else
  3119. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3120. "Wrong Input for TxRx Stats");
  3121. return 0;
  3122. }
  3123. /*
  3124. * dp_print_per_ring_stats(): Packet count per ring
  3125. * @soc - soc handle
  3126. */
  3127. static void dp_print_per_ring_stats(struct dp_soc *soc)
  3128. {
  3129. uint8_t core, ring;
  3130. uint64_t total_packets;
  3131. DP_TRACE(FATAL, "Reo packets per ring:");
  3132. for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
  3133. total_packets = 0;
  3134. DP_TRACE(FATAL, "Packets on ring %u:", ring);
  3135. for (core = 0; core < NR_CPUS; core++) {
  3136. DP_TRACE(FATAL, "Packets arriving on core %u: %llu",
  3137. core, soc->stats.rx.ring_packets[core][ring]);
  3138. total_packets += soc->stats.rx.ring_packets[core][ring];
  3139. }
  3140. DP_TRACE(FATAL, "Total packets on ring %u: %llu",
  3141. ring, total_packets);
  3142. }
  3143. }
  3144. /*
  3145. * dp_txrx_path_stats() - Function to display dump stats
  3146. * @soc - soc handle
  3147. *
  3148. * return: none
  3149. */
  3150. static void dp_txrx_path_stats(struct dp_soc *soc)
  3151. {
  3152. uint8_t error_code;
  3153. uint8_t loop_pdev;
  3154. struct dp_pdev *pdev;
  3155. uint8_t i;
  3156. for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
  3157. pdev = soc->pdev_list[loop_pdev];
  3158. dp_aggregate_pdev_stats(pdev);
  3159. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3160. "Tx path Statistics:");
  3161. DP_TRACE(FATAL, "from stack: %u msdus (%u bytes)",
  3162. pdev->stats.tx_i.rcvd.num,
  3163. pdev->stats.tx_i.rcvd.bytes);
  3164. DP_TRACE(FATAL, "processed from host: %u msdus (%u bytes)",
  3165. pdev->stats.tx_i.processed.num,
  3166. pdev->stats.tx_i.processed.bytes);
  3167. DP_TRACE(FATAL, "successfully transmitted: %u msdus (%u bytes)",
  3168. pdev->stats.tx.tx_success.num,
  3169. pdev->stats.tx.tx_success.bytes);
  3170. DP_TRACE(FATAL, "Dropped in host:");
  3171. DP_TRACE(FATAL, "Total packets dropped: %u,",
  3172. pdev->stats.tx_i.dropped.dropped_pkt.num);
  3173. DP_TRACE(FATAL, "Descriptor not available: %u",
  3174. pdev->stats.tx_i.dropped.desc_na);
  3175. DP_TRACE(FATAL, "Ring full: %u",
  3176. pdev->stats.tx_i.dropped.ring_full);
  3177. DP_TRACE(FATAL, "Enqueue fail: %u",
  3178. pdev->stats.tx_i.dropped.enqueue_fail);
  3179. DP_TRACE(FATAL, "DMA Error: %u",
  3180. pdev->stats.tx_i.dropped.dma_error);
  3181. DP_TRACE(FATAL, "Dropped in hardware:");
  3182. DP_TRACE(FATAL, "total packets dropped: %u",
  3183. pdev->stats.tx.tx_failed);
  3184. DP_TRACE(FATAL, "mpdu age out: %u",
  3185. pdev->stats.tx.dropped.mpdu_age_out);
  3186. DP_TRACE(FATAL, "firmware discard reason1: %u",
  3187. pdev->stats.tx.dropped.fw_discard_reason1);
  3188. DP_TRACE(FATAL, "firmware discard reason2: %u",
  3189. pdev->stats.tx.dropped.fw_discard_reason2);
  3190. DP_TRACE(FATAL, "firmware discard reason3: %u",
  3191. pdev->stats.tx.dropped.fw_discard_reason3);
  3192. DP_TRACE(FATAL, "peer_invalid: %u",
  3193. pdev->soc->stats.tx.tx_invalid_peer.num);
  3194. DP_TRACE(FATAL, "Tx packets sent per interrupt:");
  3195. DP_TRACE(FATAL, "Single Packet: %u",
  3196. pdev->stats.tx_comp_histogram.pkts_1);
  3197. DP_TRACE(FATAL, "2-20 Packets: %u",
  3198. pdev->stats.tx_comp_histogram.pkts_2_20);
  3199. DP_TRACE(FATAL, "21-40 Packets: %u",
  3200. pdev->stats.tx_comp_histogram.pkts_21_40);
  3201. DP_TRACE(FATAL, "41-60 Packets: %u",
  3202. pdev->stats.tx_comp_histogram.pkts_41_60);
  3203. DP_TRACE(FATAL, "61-80 Packets: %u",
  3204. pdev->stats.tx_comp_histogram.pkts_61_80);
  3205. DP_TRACE(FATAL, "81-100 Packets: %u",
  3206. pdev->stats.tx_comp_histogram.pkts_81_100);
  3207. DP_TRACE(FATAL, "101-200 Packets: %u",
  3208. pdev->stats.tx_comp_histogram.pkts_101_200);
  3209. DP_TRACE(FATAL, " 201+ Packets: %u",
  3210. pdev->stats.tx_comp_histogram.pkts_201_plus);
  3211. DP_TRACE(FATAL, "Rx path statistics");
  3212. DP_TRACE(FATAL, "delivered %u msdus ( %u bytes),",
  3213. pdev->stats.rx.to_stack.num,
  3214. pdev->stats.rx.to_stack.bytes);
  3215. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  3216. DP_TRACE(FATAL, "received on reo[%d] %u msdus ( %u bytes),",
  3217. i, pdev->stats.rx.rcvd_reo[i].num,
  3218. pdev->stats.rx.rcvd_reo[i].bytes);
  3219. DP_TRACE(FATAL, "intra-bss packets %u msdus ( %u bytes),",
  3220. pdev->stats.rx.intra_bss.pkts.num,
  3221. pdev->stats.rx.intra_bss.pkts.bytes);
  3222. DP_TRACE(FATAL, "raw packets %u msdus ( %u bytes),",
  3223. pdev->stats.rx.raw.num,
  3224. pdev->stats.rx.raw.bytes);
  3225. DP_TRACE(FATAL, "dropped: error %u msdus",
  3226. pdev->stats.rx.err.mic_err);
  3227. DP_TRACE(FATAL, "peer invalid %u",
  3228. pdev->soc->stats.rx.err.rx_invalid_peer.num);
  3229. DP_TRACE(FATAL, "Reo Statistics");
  3230. DP_TRACE(FATAL, "rbm error: %u msdus",
  3231. pdev->soc->stats.rx.err.invalid_rbm);
  3232. DP_TRACE(FATAL, "hal ring access fail: %u msdus",
  3233. pdev->soc->stats.rx.err.hal_ring_access_fail);
  3234. DP_TRACE(FATAL, "Reo errors");
  3235. for (error_code = 0; error_code < REO_ERROR_TYPE_MAX;
  3236. error_code++) {
  3237. DP_TRACE(FATAL, "Reo error number (%u): %u msdus",
  3238. error_code,
  3239. pdev->soc->stats.rx.err.reo_error[error_code]);
  3240. }
  3241. for (error_code = 0; error_code < MAX_RXDMA_ERRORS;
  3242. error_code++) {
  3243. DP_TRACE(FATAL, "Rxdma error number (%u): %u msdus",
  3244. error_code,
  3245. pdev->soc->stats.rx.err
  3246. .rxdma_error[error_code]);
  3247. }
  3248. DP_TRACE(FATAL, "Rx packets reaped per interrupt:");
  3249. DP_TRACE(FATAL, "Single Packet: %u",
  3250. pdev->stats.rx_ind_histogram.pkts_1);
  3251. DP_TRACE(FATAL, "2-20 Packets: %u",
  3252. pdev->stats.rx_ind_histogram.pkts_2_20);
  3253. DP_TRACE(FATAL, "21-40 Packets: %u",
  3254. pdev->stats.rx_ind_histogram.pkts_21_40);
  3255. DP_TRACE(FATAL, "41-60 Packets: %u",
  3256. pdev->stats.rx_ind_histogram.pkts_41_60);
  3257. DP_TRACE(FATAL, "61-80 Packets: %u",
  3258. pdev->stats.rx_ind_histogram.pkts_61_80);
  3259. DP_TRACE(FATAL, "81-100 Packets: %u",
  3260. pdev->stats.rx_ind_histogram.pkts_81_100);
  3261. DP_TRACE(FATAL, "101-200 Packets: %u",
  3262. pdev->stats.rx_ind_histogram.pkts_101_200);
  3263. DP_TRACE(FATAL, " 201+ Packets: %u",
  3264. pdev->stats.rx_ind_histogram.pkts_201_plus);
  3265. }
  3266. }
  3267. /*
  3268. * dp_txrx_dump_stats() - Dump statistics
  3269. * @value - Statistics option
  3270. */
  3271. static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value)
  3272. {
  3273. struct dp_soc *soc =
  3274. (struct dp_soc *)psoc;
  3275. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3276. if (!soc) {
  3277. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3278. "%s: soc is NULL", __func__);
  3279. return QDF_STATUS_E_INVAL;
  3280. }
  3281. switch (value) {
  3282. case CDP_TXRX_PATH_STATS:
  3283. dp_txrx_path_stats(soc);
  3284. break;
  3285. case CDP_RX_RING_STATS:
  3286. dp_print_per_ring_stats(soc);
  3287. break;
  3288. case CDP_TXRX_TSO_STATS:
  3289. /* TODO: NOT IMPLEMENTED */
  3290. break;
  3291. case CDP_DUMP_TX_FLOW_POOL_INFO:
  3292. /* TODO: NOT IMPLEMENTED */
  3293. break;
  3294. case CDP_TXRX_DESC_STATS:
  3295. /* TODO: NOT IMPLEMENTED */
  3296. break;
  3297. default:
  3298. status = QDF_STATUS_E_INVAL;
  3299. break;
  3300. }
  3301. return status;
  3302. }
  3303. static struct cdp_wds_ops dp_ops_wds = {
  3304. .vdev_set_wds = dp_vdev_set_wds,
  3305. };
  3306. static struct cdp_cmn_ops dp_ops_cmn = {
  3307. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  3308. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  3309. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  3310. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  3311. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  3312. .txrx_peer_create = dp_peer_create_wifi3,
  3313. .txrx_peer_setup = dp_peer_setup_wifi3,
  3314. .txrx_peer_teardown = NULL,
  3315. .txrx_peer_delete = dp_peer_delete_wifi3,
  3316. .txrx_vdev_register = dp_vdev_register_wifi3,
  3317. .txrx_soc_detach = dp_soc_detach_wifi3,
  3318. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  3319. .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
  3320. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  3321. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  3322. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  3323. .delba_process = dp_delba_process_wifi3,
  3324. .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
  3325. .flush_cache_rx_queue = NULL,
  3326. /* TODO: get API's for dscp-tid need to be added*/
  3327. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  3328. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  3329. .txrx_stats = dp_txrx_stats,
  3330. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  3331. .display_stats = dp_txrx_dump_stats,
  3332. .txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
  3333. .txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
  3334. .txrx_intr_attach = dp_soc_interrupt_attach,
  3335. .txrx_intr_detach = dp_soc_interrupt_detach,
  3336. /* TODO: Add other functions */
  3337. };
  3338. static struct cdp_ctrl_ops dp_ops_ctrl = {
  3339. .txrx_peer_authorize = dp_peer_authorize,
  3340. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  3341. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  3342. #ifdef MESH_MODE_SUPPORT
  3343. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  3344. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  3345. #endif
  3346. .txrx_set_vdev_param = dp_set_vdev_param,
  3347. .txrx_peer_set_nawds = dp_peer_set_nawds,
  3348. .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
  3349. .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
  3350. .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
  3351. .txrx_update_filter_neighbour_peers =
  3352. dp_update_filter_neighbour_peers,
  3353. /* TODO: Add other functions */
  3354. };
  3355. static struct cdp_me_ops dp_ops_me = {
  3356. #ifdef ATH_SUPPORT_IQUE
  3357. .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
  3358. .tx_me_free_descriptor = dp_tx_me_free_descriptor,
  3359. .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
  3360. #endif
  3361. };
  3362. static struct cdp_mon_ops dp_ops_mon = {
  3363. .txrx_monitor_set_filter_ucast_data = NULL,
  3364. .txrx_monitor_set_filter_mcast_data = NULL,
  3365. .txrx_monitor_set_filter_non_data = NULL,
  3366. .txrx_monitor_get_filter_ucast_data = NULL,
  3367. .txrx_monitor_get_filter_mcast_data = NULL,
  3368. .txrx_monitor_get_filter_non_data = NULL,
  3369. .txrx_reset_monitor_mode = NULL,
  3370. };
  3371. static struct cdp_host_stats_ops dp_ops_host_stats = {
  3372. .txrx_per_peer_stats = dp_get_host_peer_stats,
  3373. .get_fw_peer_stats = dp_get_fw_peer_stats,
  3374. /* TODO */
  3375. };
  3376. static struct cdp_raw_ops dp_ops_raw = {
  3377. /* TODO */
  3378. };
  3379. #ifdef CONFIG_WIN
  3380. static struct cdp_pflow_ops dp_ops_pflow = {
  3381. /* TODO */
  3382. };
  3383. #endif /* CONFIG_WIN */
  3384. #ifndef CONFIG_WIN
  3385. static struct cdp_misc_ops dp_ops_misc = {
  3386. .get_opmode = dp_get_opmode,
  3387. };
  3388. static struct cdp_flowctl_ops dp_ops_flowctl = {
  3389. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3390. };
  3391. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  3392. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3393. };
  3394. static struct cdp_ipa_ops dp_ops_ipa = {
  3395. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3396. };
  3397. /**
  3398. * dp_dummy_bus_suspend() - dummy bus suspend op
  3399. *
  3400. * FIXME - This is a placeholder for the actual logic!
  3401. *
  3402. * Return: QDF_STATUS_SUCCESS
  3403. */
  3404. inline QDF_STATUS dp_dummy_bus_suspend(void)
  3405. {
  3406. return QDF_STATUS_SUCCESS;
  3407. }
  3408. /**
  3409. * dp_dummy_bus_resume() - dummy bus resume
  3410. *
  3411. * FIXME - This is a placeholder for the actual logic!
  3412. *
  3413. * Return: QDF_STATUS_SUCCESS
  3414. */
  3415. inline QDF_STATUS dp_dummy_bus_resume(void)
  3416. {
  3417. return QDF_STATUS_SUCCESS;
  3418. }
  3419. static struct cdp_bus_ops dp_ops_bus = {
  3420. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3421. .bus_suspend = dp_dummy_bus_suspend,
  3422. .bus_resume = dp_dummy_bus_resume
  3423. };
  3424. static struct cdp_ocb_ops dp_ops_ocb = {
  3425. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3426. };
  3427. static struct cdp_throttle_ops dp_ops_throttle = {
  3428. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3429. };
  3430. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  3431. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3432. };
  3433. static struct cdp_cfg_ops dp_ops_cfg = {
  3434. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3435. };
  3436. static struct cdp_peer_ops dp_ops_peer = {
  3437. .register_peer = dp_register_peer,
  3438. .clear_peer = dp_clear_peer,
  3439. .find_peer_by_addr = dp_find_peer_by_addr,
  3440. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  3441. .local_peer_id = dp_local_peer_id,
  3442. .peer_find_by_local_id = dp_peer_find_by_local_id,
  3443. .peer_state_update = dp_peer_state_update,
  3444. .get_vdevid = dp_get_vdevid,
  3445. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  3446. .get_vdev_for_peer = dp_get_vdev_for_peer,
  3447. .get_peer_state = dp_get_peer_state,
  3448. .last_assoc_received = dp_get_last_assoc_received,
  3449. .last_disassoc_received = dp_get_last_disassoc_received,
  3450. .last_deauth_received = dp_get_last_deauth_received,
  3451. };
  3452. #endif
  3453. static struct cdp_ops dp_txrx_ops = {
  3454. .cmn_drv_ops = &dp_ops_cmn,
  3455. .ctrl_ops = &dp_ops_ctrl,
  3456. .me_ops = &dp_ops_me,
  3457. .mon_ops = &dp_ops_mon,
  3458. .host_stats_ops = &dp_ops_host_stats,
  3459. .wds_ops = &dp_ops_wds,
  3460. .raw_ops = &dp_ops_raw,
  3461. #ifdef CONFIG_WIN
  3462. .pflow_ops = &dp_ops_pflow,
  3463. #endif /* CONFIG_WIN */
  3464. #ifndef CONFIG_WIN
  3465. .misc_ops = &dp_ops_misc,
  3466. .cfg_ops = &dp_ops_cfg,
  3467. .flowctl_ops = &dp_ops_flowctl,
  3468. .l_flowctl_ops = &dp_ops_l_flowctl,
  3469. .ipa_ops = &dp_ops_ipa,
  3470. .bus_ops = &dp_ops_bus,
  3471. .ocb_ops = &dp_ops_ocb,
  3472. .peer_ops = &dp_ops_peer,
  3473. .throttle_ops = &dp_ops_throttle,
  3474. .mob_stats_ops = &dp_ops_mob_stats,
  3475. #endif
  3476. };
  3477. /*
  3478. * dp_soc_attach_wifi3() - Attach txrx SOC
  3479. * @osif_soc: Opaque SOC handle from OSIF/HDD
  3480. * @htc_handle: Opaque HTC handle
  3481. * @hif_handle: Opaque HIF handle
  3482. * @qdf_osdev: QDF device
  3483. *
  3484. * Return: DP SOC handle on success, NULL on failure
  3485. */
  3486. /*
  3487. * Local prototype added to temporarily address warning caused by
  3488. * -Wmissing-prototypes. A more correct solution, namely to expose
  3489. * a prototype in an appropriate header file, will come later.
  3490. */
  3491. void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
  3492. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  3493. struct ol_if_ops *ol_ops, struct wlan_objmgr_psoc *psoc);
  3494. void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
  3495. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  3496. struct ol_if_ops *ol_ops, struct wlan_objmgr_psoc *psoc)
  3497. {
  3498. struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
  3499. if (!soc) {
  3500. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3501. FL("DP SOC memory allocation failed"));
  3502. goto fail0;
  3503. }
  3504. soc->cdp_soc.ops = &dp_txrx_ops;
  3505. soc->cdp_soc.ol_ops = ol_ops;
  3506. soc->osif_soc = osif_soc;
  3507. soc->osdev = qdf_osdev;
  3508. soc->hif_handle = hif_handle;
  3509. soc->psoc = psoc;
  3510. soc->hal_soc = hif_get_hal_handle(hif_handle);
  3511. soc->htt_handle = htt_soc_attach(soc, osif_soc, htc_handle,
  3512. soc->hal_soc, qdf_osdev);
  3513. if (!soc->htt_handle) {
  3514. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3515. FL("HTT attach failed"));
  3516. goto fail1;
  3517. }
  3518. soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
  3519. if (!soc->wlan_cfg_ctx) {
  3520. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3521. FL("wlan_cfg_soc_attach failed"));
  3522. goto fail2;
  3523. }
  3524. qdf_spinlock_create(&soc->peer_ref_mutex);
  3525. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  3526. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  3527. return (void *)soc;
  3528. fail2:
  3529. htt_soc_detach(soc->htt_handle);
  3530. fail1:
  3531. qdf_mem_free(soc);
  3532. fail0:
  3533. return NULL;
  3534. }