dp_main.c 111 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <qdf_lro.h>
  22. #include <hal_api.h>
  23. #include <hif.h>
  24. #include <htt.h>
  25. #include <wdi_event.h>
  26. #include <queue.h>
  27. #include "dp_htt.h"
  28. #include "dp_types.h"
  29. #include "dp_internal.h"
  30. #include "dp_tx.h"
  31. #include "dp_rx.h"
  32. #include <cdp_txrx_handle.h>
  33. #include <wlan_cfg.h>
  34. #include "cdp_txrx_cmn_struct.h"
  35. #include <qdf_util.h>
  36. #include "dp_peer.h"
  37. #include "dp_rx_mon.h"
  38. #define DP_INTR_POLL_TIMER_MS 10
  39. #define DP_MCS_LENGTH (6*MAX_MCS)
  40. #define DP_NSS_LENGTH (6*SS_COUNT)
  41. #define DP_RXDMA_ERR_LENGTH (6*MAX_RXDMA_ERRORS)
  42. #define DP_REO_ERR_LENGTH (6*REO_ERROR_TYPE_MAX)
  43. /**
  44. * default_dscp_tid_map - Default DSCP-TID mapping
  45. *
  46. * DSCP TID AC
  47. * 000000 0 WME_AC_BE
  48. * 001000 1 WME_AC_BK
  49. * 010000 1 WME_AC_BK
  50. * 011000 0 WME_AC_BE
  51. * 100000 5 WME_AC_VI
  52. * 101000 5 WME_AC_VI
  53. * 110000 6 WME_AC_VO
  54. * 111000 6 WME_AC_VO
  55. */
  56. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  57. 0, 0, 0, 0, 0, 0, 0, 0,
  58. 1, 1, 1, 1, 1, 1, 1, 1,
  59. 1, 1, 1, 1, 1, 1, 1, 1,
  60. 0, 0, 0, 0, 0, 0, 0, 0,
  61. 5, 5, 5, 5, 5, 5, 5, 5,
  62. 5, 5, 5, 5, 5, 5, 5, 5,
  63. 6, 6, 6, 6, 6, 6, 6, 6,
  64. 6, 6, 6, 6, 6, 6, 6, 6,
  65. };
  66. /**
  67. * @brief Select the type of statistics
  68. */
  69. enum dp_stats_type {
  70. STATS_FW = 0,
  71. STATS_HOST = 1,
  72. STATS_TYPE_MAX = 2,
  73. };
  74. /**
  75. * @brief General Firmware statistics options
  76. *
  77. */
  78. enum dp_fw_stats {
  79. TXRX_FW_STATS_INVALID = -1,
  80. };
  81. /**
  82. * @brief Firmware and Host statistics
  83. * currently supported
  84. */
  85. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  86. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  87. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  88. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  89. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  90. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  91. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  92. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  93. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  94. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  95. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  96. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  97. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  98. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  99. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  100. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  101. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  102. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  103. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  104. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  105. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  106. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  107. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  108. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  109. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  110. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  111. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  112. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  113. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  114. };
  115. /**
  116. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  117. */
  118. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  119. int ring_type, int ring_num, int mac_id, uint32_t num_entries)
  120. {
  121. void *hal_soc = soc->hal_soc;
  122. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  123. /* TODO: See if we should get align size from hal */
  124. uint32_t ring_base_align = 8;
  125. struct hal_srng_params ring_params;
  126. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  127. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  128. srng->hal_srng = NULL;
  129. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  130. srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
  131. soc->osdev, soc->osdev->dev, srng->alloc_size,
  132. &(srng->base_paddr_unaligned));
  133. if (!srng->base_vaddr_unaligned) {
  134. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  135. FL("alloc failed - ring_type: %d, ring_num %d"),
  136. ring_type, ring_num);
  137. return QDF_STATUS_E_NOMEM;
  138. }
  139. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  140. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  141. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  142. ((unsigned long)(ring_params.ring_base_vaddr) -
  143. (unsigned long)srng->base_vaddr_unaligned);
  144. ring_params.num_entries = num_entries;
  145. /* TODO: Check MSI support and get MSI settings from HIF layer */
  146. ring_params.msi_data = 0;
  147. ring_params.msi_addr = 0;
  148. /* TODO: Setup interrupt timer and batch counter thresholds for
  149. * interrupt mitigation based on ring type
  150. */
  151. ring_params.intr_timer_thres_us = 8;
  152. ring_params.intr_batch_cntr_thres_entries = 1;
  153. /* TODO: Currently hal layer takes care of endianness related settings.
  154. * See if these settings need to passed from DP layer
  155. */
  156. ring_params.flags = 0;
  157. /* Enable low threshold interrupts for rx buffer rings (regular and
  158. * monitor buffer rings.
  159. * TODO: See if this is required for any other ring
  160. */
  161. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF)) {
  162. /* TODO: Setting low threshold to 1/8th of ring size
  163. * see if this needs to be configurable
  164. */
  165. ring_params.low_threshold = num_entries >> 3;
  166. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  167. }
  168. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  169. mac_id, &ring_params);
  170. return 0;
  171. }
  172. /**
  173. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  174. * Any buffers allocated and attached to ring entries are expected to be freed
  175. * before calling this function.
  176. */
  177. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  178. int ring_type, int ring_num)
  179. {
  180. if (!srng->hal_srng) {
  181. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  182. FL("Ring type: %d, num:%d not setup"),
  183. ring_type, ring_num);
  184. return;
  185. }
  186. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  187. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  188. srng->alloc_size,
  189. srng->base_vaddr_unaligned,
  190. srng->base_paddr_unaligned, 0);
  191. }
  192. /* TODO: Need this interface from HIF */
  193. void *hif_get_hal_handle(void *hif_handle);
  194. /*
  195. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  196. * @dp_ctx: DP SOC handle
  197. * @budget: Number of frames/descriptors that can be processed in one shot
  198. *
  199. * Return: remaining budget/quota for the soc device
  200. */
  201. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  202. {
  203. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  204. struct dp_soc *soc = int_ctx->soc;
  205. int ring = 0;
  206. uint32_t work_done = 0;
  207. uint32_t budget = dp_budget;
  208. uint8_t tx_mask = int_ctx->tx_ring_mask;
  209. uint8_t rx_mask = int_ctx->rx_ring_mask;
  210. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  211. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  212. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  213. /* Process Tx completion interrupts first to return back buffers */
  214. if (tx_mask) {
  215. for (ring = 0; ring < soc->num_tcl_data_rings; ring++) {
  216. if (tx_mask & (1 << ring)) {
  217. work_done =
  218. dp_tx_comp_handler(soc, ring, budget);
  219. budget -= work_done;
  220. if (work_done)
  221. QDF_TRACE(QDF_MODULE_ID_DP,
  222. QDF_TRACE_LEVEL_INFO,
  223. "tx mask 0x%x ring %d,"
  224. "budget %d",
  225. tx_mask, ring, budget);
  226. if (budget <= 0)
  227. goto budget_done;
  228. }
  229. }
  230. }
  231. /* Process REO Exception ring interrupt */
  232. if (rx_err_mask) {
  233. work_done = dp_rx_err_process(soc,
  234. soc->reo_exception_ring.hal_srng, budget);
  235. budget -= work_done;
  236. if (work_done)
  237. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  238. "REO Exception Ring: work_done %d budget %d",
  239. work_done, budget);
  240. if (budget <= 0) {
  241. goto budget_done;
  242. }
  243. }
  244. /* Process Rx WBM release ring interrupt */
  245. if (rx_wbm_rel_mask) {
  246. work_done = dp_rx_wbm_err_process(soc,
  247. soc->rx_rel_ring.hal_srng, budget);
  248. budget -= work_done;
  249. if (work_done)
  250. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  251. "WBM Release Ring: work_done %d budget %d",
  252. work_done, budget);
  253. if (budget <= 0) {
  254. goto budget_done;
  255. }
  256. }
  257. /* Process Rx interrupts */
  258. if (rx_mask) {
  259. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  260. if (rx_mask & (1 << ring)) {
  261. work_done =
  262. dp_rx_process(int_ctx,
  263. soc->reo_dest_ring[ring].hal_srng,
  264. budget);
  265. budget -= work_done;
  266. if (work_done)
  267. QDF_TRACE(QDF_MODULE_ID_DP,
  268. QDF_TRACE_LEVEL_INFO,
  269. "rx mask 0x%x ring %d,"
  270. "budget %d",
  271. tx_mask, ring, budget);
  272. if (budget <= 0)
  273. goto budget_done;
  274. }
  275. }
  276. }
  277. if (reo_status_mask)
  278. dp_reo_status_ring_handler(soc);
  279. /* Process Rx monitor interrupts */
  280. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  281. if (int_ctx->rx_mon_ring_mask & (1 << ring)) {
  282. work_done =
  283. dp_mon_process(soc, ring, budget);
  284. budget -= work_done;
  285. }
  286. }
  287. qdf_lro_flush(int_ctx->lro_ctx);
  288. budget_done:
  289. return dp_budget - budget;
  290. }
  291. /* dp_interrupt_timer()- timer poll for interrupts
  292. *
  293. * @arg: SoC Handle
  294. *
  295. * Return:
  296. *
  297. */
  298. #ifdef DP_INTR_POLL_BASED
  299. static void dp_interrupt_timer(void *arg)
  300. {
  301. struct dp_soc *soc = (struct dp_soc *) arg;
  302. int i;
  303. if (qdf_atomic_read(&soc->cmn_init_done)) {
  304. for (i = 0;
  305. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  306. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  307. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  308. }
  309. }
  310. /*
  311. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  312. * @txrx_soc: DP SOC handle
  313. *
  314. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  315. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  316. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  317. *
  318. * Return: 0 for success. nonzero for failure.
  319. */
  320. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  321. {
  322. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  323. int i;
  324. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  325. soc->intr_ctx[i].tx_ring_mask = 0xF;
  326. soc->intr_ctx[i].rx_ring_mask = 0xF;
  327. soc->intr_ctx[i].rx_mon_ring_mask = 0x1;
  328. soc->intr_ctx[i].rx_err_ring_mask = 0x1;
  329. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0x1;
  330. soc->intr_ctx[i].reo_status_ring_mask = 0x1;
  331. soc->intr_ctx[i].soc = soc;
  332. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  333. }
  334. qdf_timer_init(soc->osdev, &soc->int_timer,
  335. dp_interrupt_timer, (void *)soc,
  336. QDF_TIMER_TYPE_WAKE_APPS);
  337. return QDF_STATUS_SUCCESS;
  338. }
  339. /*
  340. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  341. * @txrx_soc: DP SOC handle
  342. *
  343. * Return: void
  344. */
  345. static void dp_soc_interrupt_detach(void *txrx_soc)
  346. {
  347. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  348. qdf_timer_stop(&soc->int_timer);
  349. qdf_timer_free(&soc->int_timer);
  350. }
  351. #else
  352. /*
  353. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  354. * @txrx_soc: DP SOC handle
  355. *
  356. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  357. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  358. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  359. *
  360. * Return: 0 for success. nonzero for failure.
  361. */
  362. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  363. {
  364. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  365. int i = 0;
  366. int num_irq = 0;
  367. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  368. int j = 0;
  369. int ret = 0;
  370. /* Map of IRQ ids registered with one interrupt context */
  371. int irq_id_map[HIF_MAX_GRP_IRQ];
  372. int tx_mask =
  373. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  374. int rx_mask =
  375. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  376. int rx_mon_mask =
  377. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  378. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  379. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  380. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  381. soc->intr_ctx[i].soc = soc;
  382. num_irq = 0;
  383. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  384. if (tx_mask & (1 << j)) {
  385. irq_id_map[num_irq++] =
  386. (wbm2host_tx_completions_ring1 - j);
  387. }
  388. if (rx_mask & (1 << j)) {
  389. irq_id_map[num_irq++] =
  390. (reo2host_destination_ring1 - j);
  391. }
  392. if (rx_mon_mask & (1 << j)) {
  393. irq_id_map[num_irq++] =
  394. (rxdma2host_monitor_destination_mac1
  395. - j);
  396. }
  397. }
  398. ret = hif_register_ext_group_int_handler(soc->hif_handle,
  399. num_irq, irq_id_map,
  400. dp_service_srngs,
  401. &soc->intr_ctx[i]);
  402. if (ret) {
  403. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  404. FL("failed, ret = %d"), ret);
  405. return QDF_STATUS_E_FAILURE;
  406. }
  407. soc->intr_ctx[i].lro_ctx = qdf_lro_init();
  408. }
  409. hif_configure_ext_group_interrupts(soc->hif_handle);
  410. return QDF_STATUS_SUCCESS;
  411. }
  412. /*
  413. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  414. * @txrx_soc: DP SOC handle
  415. *
  416. * Return: void
  417. */
  418. static void dp_soc_interrupt_detach(void *txrx_soc)
  419. {
  420. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  421. int i;
  422. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  423. soc->intr_ctx[i].tx_ring_mask = 0;
  424. soc->intr_ctx[i].rx_ring_mask = 0;
  425. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  426. qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
  427. }
  428. }
  429. #endif
  430. #define AVG_MAX_MPDUS_PER_TID 128
  431. #define AVG_TIDS_PER_CLIENT 2
  432. #define AVG_FLOWS_PER_TID 2
  433. #define AVG_MSDUS_PER_FLOW 128
  434. #define AVG_MSDUS_PER_MPDU 4
  435. /*
  436. * Allocate and setup link descriptor pool that will be used by HW for
  437. * various link and queue descriptors and managed by WBM
  438. */
  439. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  440. {
  441. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  442. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  443. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  444. uint32_t num_mpdus_per_link_desc =
  445. hal_num_mpdus_per_link_desc(soc->hal_soc);
  446. uint32_t num_msdus_per_link_desc =
  447. hal_num_msdus_per_link_desc(soc->hal_soc);
  448. uint32_t num_mpdu_links_per_queue_desc =
  449. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  450. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  451. uint32_t total_link_descs, total_mem_size;
  452. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  453. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  454. uint32_t num_link_desc_banks;
  455. uint32_t last_bank_size = 0;
  456. uint32_t entry_size, num_entries;
  457. int i;
  458. /* Only Tx queue descriptors are allocated from common link descriptor
  459. * pool Rx queue descriptors are not included in this because (REO queue
  460. * extension descriptors) they are expected to be allocated contiguously
  461. * with REO queue descriptors
  462. */
  463. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  464. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  465. num_mpdu_queue_descs = num_mpdu_link_descs /
  466. num_mpdu_links_per_queue_desc;
  467. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  468. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  469. num_msdus_per_link_desc;
  470. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  471. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  472. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  473. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  474. /* Round up to power of 2 */
  475. total_link_descs = 1;
  476. while (total_link_descs < num_entries)
  477. total_link_descs <<= 1;
  478. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  479. FL("total_link_descs: %u, link_desc_size: %d"),
  480. total_link_descs, link_desc_size);
  481. total_mem_size = total_link_descs * link_desc_size;
  482. total_mem_size += link_desc_align;
  483. if (total_mem_size <= max_alloc_size) {
  484. num_link_desc_banks = 0;
  485. last_bank_size = total_mem_size;
  486. } else {
  487. num_link_desc_banks = (total_mem_size) /
  488. (max_alloc_size - link_desc_align);
  489. last_bank_size = total_mem_size %
  490. (max_alloc_size - link_desc_align);
  491. }
  492. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  493. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  494. total_mem_size, num_link_desc_banks);
  495. for (i = 0; i < num_link_desc_banks; i++) {
  496. soc->link_desc_banks[i].base_vaddr_unaligned =
  497. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  498. max_alloc_size,
  499. &(soc->link_desc_banks[i].base_paddr_unaligned));
  500. soc->link_desc_banks[i].size = max_alloc_size;
  501. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  502. soc->link_desc_banks[i].base_vaddr_unaligned) +
  503. ((unsigned long)(
  504. soc->link_desc_banks[i].base_vaddr_unaligned) %
  505. link_desc_align));
  506. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  507. soc->link_desc_banks[i].base_paddr_unaligned) +
  508. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  509. (unsigned long)(
  510. soc->link_desc_banks[i].base_vaddr_unaligned));
  511. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  512. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  513. FL("Link descriptor memory alloc failed"));
  514. goto fail;
  515. }
  516. }
  517. if (last_bank_size) {
  518. /* Allocate last bank in case total memory required is not exact
  519. * multiple of max_alloc_size
  520. */
  521. soc->link_desc_banks[i].base_vaddr_unaligned =
  522. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  523. last_bank_size,
  524. &(soc->link_desc_banks[i].base_paddr_unaligned));
  525. soc->link_desc_banks[i].size = last_bank_size;
  526. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  527. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  528. ((unsigned long)(
  529. soc->link_desc_banks[i].base_vaddr_unaligned) %
  530. link_desc_align));
  531. soc->link_desc_banks[i].base_paddr =
  532. (unsigned long)(
  533. soc->link_desc_banks[i].base_paddr_unaligned) +
  534. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  535. (unsigned long)(
  536. soc->link_desc_banks[i].base_vaddr_unaligned));
  537. }
  538. /* Allocate and setup link descriptor idle list for HW internal use */
  539. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  540. total_mem_size = entry_size * total_link_descs;
  541. if (total_mem_size <= max_alloc_size) {
  542. void *desc;
  543. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  544. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  545. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  546. FL("Link desc idle ring setup failed"));
  547. goto fail;
  548. }
  549. hal_srng_access_start_unlocked(soc->hal_soc,
  550. soc->wbm_idle_link_ring.hal_srng);
  551. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  552. soc->link_desc_banks[i].base_paddr; i++) {
  553. uint32_t num_entries = (soc->link_desc_banks[i].size -
  554. (unsigned long)(
  555. soc->link_desc_banks[i].base_vaddr) -
  556. (unsigned long)(
  557. soc->link_desc_banks[i].base_vaddr_unaligned))
  558. / link_desc_size;
  559. unsigned long paddr = (unsigned long)(
  560. soc->link_desc_banks[i].base_paddr);
  561. while (num_entries && (desc = hal_srng_src_get_next(
  562. soc->hal_soc,
  563. soc->wbm_idle_link_ring.hal_srng))) {
  564. hal_set_link_desc_addr(desc, i, paddr);
  565. num_entries--;
  566. paddr += link_desc_size;
  567. }
  568. }
  569. hal_srng_access_end_unlocked(soc->hal_soc,
  570. soc->wbm_idle_link_ring.hal_srng);
  571. } else {
  572. uint32_t num_scatter_bufs;
  573. uint32_t num_entries_per_buf;
  574. uint32_t rem_entries;
  575. uint8_t *scatter_buf_ptr;
  576. uint16_t scatter_buf_num;
  577. soc->wbm_idle_scatter_buf_size =
  578. hal_idle_list_scatter_buf_size(soc->hal_soc);
  579. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  580. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  581. num_scatter_bufs = (total_mem_size /
  582. soc->wbm_idle_scatter_buf_size) + (total_mem_size %
  583. soc->wbm_idle_scatter_buf_size) ? 1 : 0;
  584. for (i = 0; i < num_scatter_bufs; i++) {
  585. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  586. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  587. soc->wbm_idle_scatter_buf_size,
  588. &(soc->wbm_idle_scatter_buf_base_paddr[i]));
  589. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  590. QDF_TRACE(QDF_MODULE_ID_DP,
  591. QDF_TRACE_LEVEL_ERROR,
  592. FL("Scatter list memory alloc failed"));
  593. goto fail;
  594. }
  595. }
  596. /* Populate idle list scatter buffers with link descriptor
  597. * pointers
  598. */
  599. scatter_buf_num = 0;
  600. scatter_buf_ptr = (uint8_t *)(
  601. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  602. rem_entries = num_entries_per_buf;
  603. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  604. soc->link_desc_banks[i].base_paddr; i++) {
  605. uint32_t num_link_descs =
  606. (soc->link_desc_banks[i].size -
  607. (unsigned long)(
  608. soc->link_desc_banks[i].base_vaddr) -
  609. (unsigned long)(
  610. soc->link_desc_banks[i].base_vaddr_unaligned)) /
  611. link_desc_size;
  612. unsigned long paddr = (unsigned long)(
  613. soc->link_desc_banks[i].base_paddr);
  614. void *desc = NULL;
  615. while (num_link_descs && (desc =
  616. hal_srng_src_get_next(soc->hal_soc,
  617. soc->wbm_idle_link_ring.hal_srng))) {
  618. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  619. i, paddr);
  620. num_link_descs--;
  621. paddr += link_desc_size;
  622. if (rem_entries) {
  623. rem_entries--;
  624. scatter_buf_ptr += link_desc_size;
  625. } else {
  626. rem_entries = num_entries_per_buf;
  627. scatter_buf_num++;
  628. scatter_buf_ptr = (uint8_t *)(
  629. soc->wbm_idle_scatter_buf_base_vaddr[
  630. scatter_buf_num]);
  631. }
  632. }
  633. }
  634. /* Setup link descriptor idle list in HW */
  635. hal_setup_link_idle_list(soc->hal_soc,
  636. soc->wbm_idle_scatter_buf_base_paddr,
  637. soc->wbm_idle_scatter_buf_base_vaddr,
  638. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  639. (uint32_t)(scatter_buf_ptr -
  640. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  641. scatter_buf_num])));
  642. }
  643. return 0;
  644. fail:
  645. if (soc->wbm_idle_link_ring.hal_srng) {
  646. dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
  647. WBM_IDLE_LINK, 0);
  648. }
  649. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  650. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  651. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  652. soc->wbm_idle_scatter_buf_size,
  653. soc->wbm_idle_scatter_buf_base_vaddr[i],
  654. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  655. }
  656. }
  657. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  658. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  659. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  660. soc->link_desc_banks[i].size,
  661. soc->link_desc_banks[i].base_vaddr_unaligned,
  662. soc->link_desc_banks[i].base_paddr_unaligned,
  663. 0);
  664. }
  665. }
  666. return QDF_STATUS_E_FAILURE;
  667. }
  668. #ifdef notused
  669. /*
  670. * Free link descriptor pool that was setup HW
  671. */
  672. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  673. {
  674. int i;
  675. if (soc->wbm_idle_link_ring.hal_srng) {
  676. dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
  677. WBM_IDLE_LINK, 0);
  678. }
  679. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  680. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  681. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  682. soc->wbm_idle_scatter_buf_size,
  683. soc->wbm_idle_scatter_buf_base_vaddr[i],
  684. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  685. }
  686. }
  687. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  688. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  689. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  690. soc->link_desc_banks[i].size,
  691. soc->link_desc_banks[i].base_vaddr_unaligned,
  692. soc->link_desc_banks[i].base_paddr_unaligned,
  693. 0);
  694. }
  695. }
  696. }
  697. #endif /* notused */
  698. /* TODO: Following should be configurable */
  699. #define WBM_RELEASE_RING_SIZE 64
  700. #define TCL_DATA_RING_SIZE 512
  701. #define TX_COMP_RING_SIZE 1024
  702. #define TCL_CMD_RING_SIZE 32
  703. #define TCL_STATUS_RING_SIZE 32
  704. #define REO_DST_RING_SIZE 2048
  705. #define REO_REINJECT_RING_SIZE 32
  706. #define RX_RELEASE_RING_SIZE 1024
  707. #define REO_EXCEPTION_RING_SIZE 128
  708. #define REO_CMD_RING_SIZE 32
  709. #define REO_STATUS_RING_SIZE 32
  710. #define RXDMA_BUF_RING_SIZE 1024
  711. #define RXDMA_REFILL_RING_SIZE 2048
  712. #define RXDMA_MONITOR_BUF_RING_SIZE 1024
  713. #define RXDMA_MONITOR_DST_RING_SIZE 1024
  714. #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
  715. #define RXDMA_MONITOR_DESC_RING_SIZE 1024
  716. /*
  717. * dp_soc_cmn_setup() - Common SoC level initializion
  718. * @soc: Datapath SOC handle
  719. *
  720. * This is an internal function used to setup common SOC data structures,
  721. * to be called from PDEV attach after receiving HW mode capabilities from FW
  722. */
  723. static int dp_soc_cmn_setup(struct dp_soc *soc)
  724. {
  725. int i;
  726. struct hal_reo_params reo_params;
  727. if (qdf_atomic_read(&soc->cmn_init_done))
  728. return 0;
  729. if (dp_peer_find_attach(soc))
  730. goto fail0;
  731. if (dp_hw_link_desc_pool_setup(soc))
  732. goto fail1;
  733. /* Setup SRNG rings */
  734. /* Common rings */
  735. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  736. WBM_RELEASE_RING_SIZE)) {
  737. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  738. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  739. goto fail1;
  740. }
  741. soc->num_tcl_data_rings = 0;
  742. /* Tx data rings */
  743. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  744. soc->num_tcl_data_rings =
  745. wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  746. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  747. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  748. TCL_DATA, i, 0, TCL_DATA_RING_SIZE)) {
  749. QDF_TRACE(QDF_MODULE_ID_DP,
  750. QDF_TRACE_LEVEL_ERROR,
  751. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  752. goto fail1;
  753. }
  754. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  755. WBM2SW_RELEASE, i, 0, TX_COMP_RING_SIZE)) {
  756. QDF_TRACE(QDF_MODULE_ID_DP,
  757. QDF_TRACE_LEVEL_ERROR,
  758. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  759. goto fail1;
  760. }
  761. }
  762. } else {
  763. /* This will be incremented during per pdev ring setup */
  764. soc->num_tcl_data_rings = 0;
  765. }
  766. if (dp_tx_soc_attach(soc)) {
  767. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  768. FL("dp_tx_soc_attach failed"));
  769. goto fail1;
  770. }
  771. /* TCL command and status rings */
  772. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  773. TCL_CMD_RING_SIZE)) {
  774. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  775. FL("dp_srng_setup failed for tcl_cmd_ring"));
  776. goto fail1;
  777. }
  778. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  779. TCL_STATUS_RING_SIZE)) {
  780. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  781. FL("dp_srng_setup failed for tcl_status_ring"));
  782. goto fail1;
  783. }
  784. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  785. * descriptors
  786. */
  787. /* Rx data rings */
  788. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  789. soc->num_reo_dest_rings =
  790. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  791. QDF_TRACE(QDF_MODULE_ID_DP,
  792. QDF_TRACE_LEVEL_ERROR,
  793. FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
  794. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  795. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  796. i, 0, REO_DST_RING_SIZE)) {
  797. QDF_TRACE(QDF_MODULE_ID_DP,
  798. QDF_TRACE_LEVEL_ERROR,
  799. FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
  800. goto fail1;
  801. }
  802. }
  803. } else {
  804. /* This will be incremented during per pdev ring setup */
  805. soc->num_reo_dest_rings = 0;
  806. }
  807. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  808. /* REO reinjection ring */
  809. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  810. REO_REINJECT_RING_SIZE)) {
  811. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  812. FL("dp_srng_setup failed for reo_reinject_ring"));
  813. goto fail1;
  814. }
  815. /* Rx release ring */
  816. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  817. RX_RELEASE_RING_SIZE)) {
  818. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  819. FL("dp_srng_setup failed for rx_rel_ring"));
  820. goto fail1;
  821. }
  822. /* Rx exception ring */
  823. if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
  824. MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
  825. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  826. FL("dp_srng_setup failed for reo_exception_ring"));
  827. goto fail1;
  828. }
  829. /* REO command and status rings */
  830. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  831. REO_CMD_RING_SIZE)) {
  832. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  833. FL("dp_srng_setup failed for reo_cmd_ring"));
  834. goto fail1;
  835. }
  836. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  837. TAILQ_INIT(&soc->rx.reo_cmd_list);
  838. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  839. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  840. REO_STATUS_RING_SIZE)) {
  841. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  842. FL("dp_srng_setup failed for reo_status_ring"));
  843. goto fail1;
  844. }
  845. dp_soc_interrupt_attach(soc);
  846. /* Setup HW REO */
  847. qdf_mem_zero(&reo_params, sizeof(reo_params));
  848. if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx))
  849. reo_params.rx_hash_enabled = true;
  850. hal_reo_setup(soc->hal_soc, &reo_params);
  851. qdf_atomic_set(&soc->cmn_init_done, 1);
  852. return 0;
  853. fail1:
  854. /*
  855. * Cleanup will be done as part of soc_detach, which will
  856. * be called on pdev attach failure
  857. */
  858. fail0:
  859. return QDF_STATUS_E_FAILURE;
  860. }
  861. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
  862. static void dp_lro_hash_setup(struct dp_soc *soc)
  863. {
  864. struct cdp_lro_hash_config lro_hash;
  865. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  866. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  867. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  868. FL("LRO disabled RX hash disabled"));
  869. return;
  870. }
  871. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  872. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
  873. lro_hash.lro_enable = 1;
  874. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  875. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  876. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  877. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  878. }
  879. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, FL("enabled"));
  880. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  881. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  882. LRO_IPV4_SEED_ARR_SZ));
  883. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  884. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  885. LRO_IPV6_SEED_ARR_SZ));
  886. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  887. "lro_hash: lro_enable: 0x%x"
  888. "lro_hash: tcp_flag 0x%x tcp_flag_mask 0x%x",
  889. lro_hash.lro_enable, lro_hash.tcp_flag,
  890. lro_hash.tcp_flag_mask);
  891. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  892. FL("lro_hash: toeplitz_hash_ipv4:"));
  893. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  894. QDF_TRACE_LEVEL_ERROR,
  895. (void *)lro_hash.toeplitz_hash_ipv4,
  896. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  897. LRO_IPV4_SEED_ARR_SZ));
  898. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  899. FL("lro_hash: toeplitz_hash_ipv6:"));
  900. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  901. QDF_TRACE_LEVEL_ERROR,
  902. (void *)lro_hash.toeplitz_hash_ipv6,
  903. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  904. LRO_IPV6_SEED_ARR_SZ));
  905. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  906. if (soc->cdp_soc.ol_ops->lro_hash_config)
  907. (void)soc->cdp_soc.ol_ops->lro_hash_config
  908. (soc->osif_soc, &lro_hash);
  909. }
  910. /*
  911. * dp_rxdma_ring_setup() - configure the RX DMA rings
  912. * @soc: data path SoC handle
  913. * @pdev: Physical device handle
  914. *
  915. * Return: 0 - success, > 0 - failure
  916. */
  917. #ifdef QCA_HOST2FW_RXBUF_RING
  918. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  919. struct dp_pdev *pdev)
  920. {
  921. int max_mac_rings =
  922. wlan_cfg_get_num_mac_rings
  923. (pdev->wlan_cfg_ctx);
  924. int i;
  925. for (i = 0; i < max_mac_rings; i++) {
  926. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  927. "%s: pdev_id %d mac_id %d\n",
  928. __func__, pdev->pdev_id, i);
  929. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  930. RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
  931. QDF_TRACE(QDF_MODULE_ID_DP,
  932. QDF_TRACE_LEVEL_ERROR,
  933. FL("failed rx mac ring setup"));
  934. return QDF_STATUS_E_FAILURE;
  935. }
  936. }
  937. return QDF_STATUS_SUCCESS;
  938. }
  939. #else
  940. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  941. struct dp_pdev *pdev)
  942. {
  943. return QDF_STATUS_SUCCESS;
  944. }
  945. #endif
  946. /**
  947. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  948. * @pdev - DP_PDEV handle
  949. *
  950. * Return: void
  951. */
  952. static inline void
  953. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  954. {
  955. uint8_t map_id;
  956. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  957. qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
  958. sizeof(default_dscp_tid_map));
  959. }
  960. for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
  961. hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
  962. pdev->dscp_tid_map[map_id],
  963. map_id);
  964. }
  965. }
  966. /*
  967. * dp_pdev_attach_wifi3() - attach txrx pdev
  968. * @osif_pdev: Opaque PDEV handle from OSIF/HDD
  969. * @txrx_soc: Datapath SOC handle
  970. * @htc_handle: HTC handle for host-target interface
  971. * @qdf_osdev: QDF OS device
  972. * @pdev_id: PDEV ID
  973. *
  974. * Return: DP PDEV handle on success, NULL on failure
  975. */
  976. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  977. struct cdp_cfg *ctrl_pdev,
  978. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  979. {
  980. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  981. struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
  982. if (!pdev) {
  983. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  984. FL("DP PDEV memory allocation failed"));
  985. goto fail0;
  986. }
  987. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
  988. if (!pdev->wlan_cfg_ctx) {
  989. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  990. FL("pdev cfg_attach failed"));
  991. qdf_mem_free(pdev);
  992. goto fail0;
  993. }
  994. pdev->soc = soc;
  995. pdev->osif_pdev = ctrl_pdev;
  996. pdev->pdev_id = pdev_id;
  997. soc->pdev_list[pdev_id] = pdev;
  998. soc->pdev_count++;
  999. TAILQ_INIT(&pdev->vdev_list);
  1000. pdev->vdev_count = 0;
  1001. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  1002. TAILQ_INIT(&pdev->neighbour_peers_list);
  1003. if (dp_soc_cmn_setup(soc)) {
  1004. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1005. FL("dp_soc_cmn_setup failed"));
  1006. goto fail1;
  1007. }
  1008. /* Setup per PDEV TCL rings if configured */
  1009. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1010. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  1011. pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
  1012. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1013. FL("dp_srng_setup failed for tcl_data_ring"));
  1014. goto fail1;
  1015. }
  1016. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  1017. WBM2SW_RELEASE, pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
  1018. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1019. FL("dp_srng_setup failed for tx_comp_ring"));
  1020. goto fail1;
  1021. }
  1022. soc->num_tcl_data_rings++;
  1023. }
  1024. /* Tx specific init */
  1025. if (dp_tx_pdev_attach(pdev)) {
  1026. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1027. FL("dp_tx_pdev_attach failed"));
  1028. goto fail1;
  1029. }
  1030. /* Setup per PDEV REO rings if configured */
  1031. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1032. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  1033. pdev_id, pdev_id, REO_DST_RING_SIZE)) {
  1034. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1035. FL("dp_srng_setup failed for reo_dest_ringn"));
  1036. goto fail1;
  1037. }
  1038. soc->num_reo_dest_rings++;
  1039. }
  1040. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  1041. RXDMA_REFILL_RING_SIZE)) {
  1042. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1043. FL("dp_srng_setup failed rx refill ring"));
  1044. goto fail1;
  1045. }
  1046. if (dp_rxdma_ring_setup(soc, pdev)) {
  1047. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1048. FL("RXDMA ring config failed"));
  1049. goto fail1;
  1050. }
  1051. if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0,
  1052. pdev_id, RXDMA_MONITOR_BUF_RING_SIZE)) {
  1053. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1054. FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
  1055. goto fail1;
  1056. }
  1057. if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0,
  1058. pdev_id, RXDMA_MONITOR_DST_RING_SIZE)) {
  1059. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1060. FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
  1061. goto fail1;
  1062. }
  1063. if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring,
  1064. RXDMA_MONITOR_STATUS, 0, pdev_id,
  1065. RXDMA_MONITOR_STATUS_RING_SIZE)) {
  1066. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1067. FL("dp_srng_setup failed for rxdma_mon_status_ring"));
  1068. goto fail1;
  1069. }
  1070. if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring,
  1071. RXDMA_MONITOR_DESC, 0, pdev_id, RXDMA_MONITOR_DESC_RING_SIZE)) {
  1072. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1073. "dp_srng_setup failed for rxdma_mon_desc_ring\n");
  1074. goto fail1;
  1075. }
  1076. /* Rx specific init */
  1077. if (dp_rx_pdev_attach(pdev)) {
  1078. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1079. FL("dp_rx_pdev_attach failed "));
  1080. goto fail0;
  1081. }
  1082. DP_STATS_INIT(pdev);
  1083. #ifndef CONFIG_WIN
  1084. /* MCL */
  1085. dp_local_peer_id_pool_init(pdev);
  1086. #endif
  1087. dp_dscp_tid_map_setup(pdev);
  1088. /* Rx monitor mode specific init */
  1089. if (dp_rx_pdev_mon_attach(pdev)) {
  1090. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1091. "dp_rx_pdev_attach failed\n");
  1092. goto fail0;
  1093. }
  1094. /* set the reo destination to 1 during initialization */
  1095. pdev->reo_dest = 1;
  1096. return (struct cdp_pdev *)pdev;
  1097. fail1:
  1098. dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
  1099. fail0:
  1100. return NULL;
  1101. }
  1102. /*
  1103. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  1104. * @soc: data path SoC handle
  1105. * @pdev: Physical device handle
  1106. *
  1107. * Return: void
  1108. */
  1109. #ifdef QCA_HOST2FW_RXBUF_RING
  1110. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  1111. struct dp_pdev *pdev)
  1112. {
  1113. int max_mac_rings =
  1114. wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  1115. int i;
  1116. max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
  1117. max_mac_rings : MAX_RX_MAC_RINGS;
  1118. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  1119. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  1120. RXDMA_BUF, 1);
  1121. }
  1122. #else
  1123. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  1124. struct dp_pdev *pdev)
  1125. {
  1126. }
  1127. #endif
  1128. /*
  1129. * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
  1130. * @pdev: device object
  1131. *
  1132. * Return: void
  1133. */
  1134. static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
  1135. {
  1136. struct dp_neighbour_peer *peer = NULL;
  1137. struct dp_neighbour_peer *temp_peer = NULL;
  1138. TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
  1139. neighbour_peer_list_elem, temp_peer) {
  1140. /* delete this peer from the list */
  1141. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  1142. peer, neighbour_peer_list_elem);
  1143. qdf_mem_free(peer);
  1144. }
  1145. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  1146. }
  1147. /*
  1148. * dp_pdev_detach_wifi3() - detach txrx pdev
  1149. * @txrx_pdev: Datapath PDEV handle
  1150. * @force: Force detach
  1151. *
  1152. */
  1153. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
  1154. {
  1155. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  1156. struct dp_soc *soc = pdev->soc;
  1157. dp_tx_pdev_detach(pdev);
  1158. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1159. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  1160. TCL_DATA, pdev->pdev_id);
  1161. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  1162. WBM2SW_RELEASE, pdev->pdev_id);
  1163. }
  1164. dp_rx_pdev_detach(pdev);
  1165. dp_rx_pdev_mon_detach(pdev);
  1166. dp_neighbour_peers_detach(pdev);
  1167. /* Setup per PDEV REO rings if configured */
  1168. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1169. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  1170. REO_DST, pdev->pdev_id);
  1171. }
  1172. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  1173. dp_rxdma_ring_cleanup(soc, pdev);
  1174. dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0);
  1175. dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0);
  1176. dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
  1177. RXDMA_MONITOR_STATUS, 0);
  1178. dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring,
  1179. RXDMA_MONITOR_DESC, 0);
  1180. soc->pdev_list[pdev->pdev_id] = NULL;
  1181. soc->pdev_count--;
  1182. qdf_mem_free(pdev);
  1183. }
  1184. /*
  1185. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  1186. * @soc: DP SOC handle
  1187. */
  1188. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  1189. {
  1190. struct reo_desc_list_node *desc;
  1191. struct dp_rx_tid *rx_tid;
  1192. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  1193. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  1194. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  1195. rx_tid = &desc->rx_tid;
  1196. qdf_mem_unmap_nbytes_single(soc->osdev,
  1197. rx_tid->hw_qdesc_paddr,
  1198. QDF_DMA_BIDIRECTIONAL,
  1199. rx_tid->hw_qdesc_alloc_size);
  1200. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1201. qdf_mem_free(desc);
  1202. }
  1203. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  1204. qdf_list_destroy(&soc->reo_desc_freelist);
  1205. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  1206. }
  1207. /*
  1208. * dp_soc_detach_wifi3() - Detach txrx SOC
  1209. * @txrx_soc: DP SOC handle
  1210. *
  1211. */
  1212. static void dp_soc_detach_wifi3(void *txrx_soc)
  1213. {
  1214. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1215. int i;
  1216. qdf_atomic_set(&soc->cmn_init_done, 0);
  1217. dp_soc_interrupt_detach(soc);
  1218. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1219. if (soc->pdev_list[i])
  1220. dp_pdev_detach_wifi3(
  1221. (struct cdp_pdev *)soc->pdev_list[i], 1);
  1222. }
  1223. dp_peer_find_detach(soc);
  1224. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  1225. * SW descriptors
  1226. */
  1227. /* Free the ring memories */
  1228. /* Common rings */
  1229. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  1230. /* Tx data rings */
  1231. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1232. dp_tx_soc_detach(soc);
  1233. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  1234. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  1235. TCL_DATA, i);
  1236. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  1237. WBM2SW_RELEASE, i);
  1238. }
  1239. }
  1240. /* TCL command and status rings */
  1241. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  1242. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  1243. /* Rx data rings */
  1244. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1245. soc->num_reo_dest_rings =
  1246. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  1247. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  1248. /* TODO: Get number of rings and ring sizes
  1249. * from wlan_cfg
  1250. */
  1251. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  1252. REO_DST, i);
  1253. }
  1254. }
  1255. /* REO reinjection ring */
  1256. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  1257. /* Rx release ring */
  1258. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  1259. /* Rx exception ring */
  1260. /* TODO: Better to store ring_type and ring_num in
  1261. * dp_srng during setup
  1262. */
  1263. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  1264. /* REO command and status rings */
  1265. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  1266. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  1267. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  1268. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  1269. htt_soc_detach(soc->htt_handle);
  1270. dp_reo_desc_freelist_destroy(soc);
  1271. qdf_mem_free(soc);
  1272. }
  1273. /*
  1274. * dp_rxdma_ring_config() - configure the RX DMA rings
  1275. *
  1276. * This function is used to configure the MAC rings.
  1277. * On MCL host provides buffers in Host2FW ring
  1278. * FW refills (copies) buffers to the ring and updates
  1279. * ring_idx in register
  1280. *
  1281. * @soc: data path SoC handle
  1282. * @pdev: Physical device handle
  1283. *
  1284. * Return: void
  1285. */
  1286. #ifdef QCA_HOST2FW_RXBUF_RING
  1287. static void dp_rxdma_ring_config(struct dp_soc *soc)
  1288. {
  1289. int i;
  1290. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1291. struct dp_pdev *pdev = soc->pdev_list[i];
  1292. if (pdev) {
  1293. int mac_id = 0;
  1294. int j;
  1295. bool dbs_enable = 0;
  1296. int max_mac_rings =
  1297. wlan_cfg_get_num_mac_rings
  1298. (pdev->wlan_cfg_ctx);
  1299. htt_srng_setup(soc->htt_handle, 0,
  1300. pdev->rx_refill_buf_ring.hal_srng,
  1301. RXDMA_BUF);
  1302. if (soc->cdp_soc.ol_ops->
  1303. is_hw_dbs_2x2_capable) {
  1304. dbs_enable = soc->cdp_soc.ol_ops->
  1305. is_hw_dbs_2x2_capable(soc->psoc);
  1306. }
  1307. if (dbs_enable) {
  1308. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1309. QDF_TRACE_LEVEL_ERROR,
  1310. FL("DBS enabled max_mac_rings %d\n"),
  1311. max_mac_rings);
  1312. } else {
  1313. max_mac_rings = 1;
  1314. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1315. QDF_TRACE_LEVEL_ERROR,
  1316. FL("DBS disabled, max_mac_rings %d\n"),
  1317. max_mac_rings);
  1318. }
  1319. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1320. FL("pdev_id %d max_mac_rings %d\n"),
  1321. pdev->pdev_id, max_mac_rings);
  1322. for (j = 0; j < max_mac_rings; j++) {
  1323. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1324. QDF_TRACE_LEVEL_ERROR,
  1325. FL("mac_id %d\n"), mac_id);
  1326. htt_srng_setup(soc->htt_handle, mac_id,
  1327. pdev->rx_mac_buf_ring[j]
  1328. .hal_srng,
  1329. RXDMA_BUF);
  1330. mac_id++;
  1331. }
  1332. }
  1333. }
  1334. }
  1335. #else
  1336. static void dp_rxdma_ring_config(struct dp_soc *soc)
  1337. {
  1338. int i;
  1339. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1340. struct dp_pdev *pdev = soc->pdev_list[i];
  1341. if (pdev) {
  1342. htt_srng_setup(soc->htt_handle, i,
  1343. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  1344. htt_srng_setup(soc->htt_handle, i,
  1345. pdev->rxdma_mon_buf_ring.hal_srng,
  1346. RXDMA_MONITOR_BUF);
  1347. htt_srng_setup(soc->htt_handle, i,
  1348. pdev->rxdma_mon_dst_ring.hal_srng,
  1349. RXDMA_MONITOR_DST);
  1350. htt_srng_setup(soc->htt_handle, i,
  1351. pdev->rxdma_mon_status_ring.hal_srng,
  1352. RXDMA_MONITOR_STATUS);
  1353. htt_srng_setup(soc->htt_handle, i,
  1354. pdev->rxdma_mon_desc_ring.hal_srng,
  1355. RXDMA_MONITOR_DESC);
  1356. }
  1357. }
  1358. }
  1359. #endif
  1360. /*
  1361. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  1362. * @txrx_soc: Datapath SOC handle
  1363. */
  1364. static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  1365. {
  1366. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  1367. htt_soc_attach_target(soc->htt_handle);
  1368. dp_rxdma_ring_config(soc);
  1369. DP_STATS_INIT(soc);
  1370. return 0;
  1371. }
  1372. /*
  1373. * dp_vdev_attach_wifi3() - attach txrx vdev
  1374. * @txrx_pdev: Datapath PDEV handle
  1375. * @vdev_mac_addr: MAC address of the virtual interface
  1376. * @vdev_id: VDEV Id
  1377. * @wlan_op_mode: VDEV operating mode
  1378. *
  1379. * Return: DP VDEV handle on success, NULL on failure
  1380. */
  1381. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
  1382. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  1383. {
  1384. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  1385. struct dp_soc *soc = pdev->soc;
  1386. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  1387. if (!vdev) {
  1388. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1389. FL("DP VDEV memory allocation failed"));
  1390. goto fail0;
  1391. }
  1392. vdev->pdev = pdev;
  1393. vdev->vdev_id = vdev_id;
  1394. vdev->opmode = op_mode;
  1395. vdev->osdev = soc->osdev;
  1396. vdev->osif_rx = NULL;
  1397. vdev->osif_rsim_rx_decap = NULL;
  1398. vdev->osif_rx_mon = NULL;
  1399. vdev->osif_tx_free_ext = NULL;
  1400. vdev->osif_vdev = NULL;
  1401. vdev->delete.pending = 0;
  1402. vdev->safemode = 0;
  1403. vdev->drop_unenc = 1;
  1404. #ifdef notyet
  1405. vdev->filters_num = 0;
  1406. #endif
  1407. qdf_mem_copy(
  1408. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  1409. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  1410. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  1411. vdev->dscp_tid_map_id = 0;
  1412. vdev->mcast_enhancement_en = 0;
  1413. /* TODO: Initialize default HTT meta data that will be used in
  1414. * TCL descriptors for packets transmitted from this VDEV
  1415. */
  1416. TAILQ_INIT(&vdev->peer_list);
  1417. /* add this vdev into the pdev's list */
  1418. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  1419. pdev->vdev_count++;
  1420. dp_tx_vdev_attach(vdev);
  1421. #ifdef DP_INTR_POLL_BASED
  1422. if (pdev->vdev_count == 1)
  1423. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  1424. #endif
  1425. dp_lro_hash_setup(soc);
  1426. /* LRO */
  1427. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  1428. wlan_op_mode_sta == vdev->opmode)
  1429. vdev->lro_enable = true;
  1430. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1431. "LRO: vdev_id %d lro_enable %d", vdev_id, vdev->lro_enable);
  1432. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1433. "Created vdev %p (%pM)", vdev, vdev->mac_addr.raw);
  1434. DP_STATS_INIT(vdev);
  1435. return (struct cdp_vdev *)vdev;
  1436. fail0:
  1437. return NULL;
  1438. }
  1439. /**
  1440. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  1441. * @vdev: Datapath VDEV handle
  1442. * @osif_vdev: OSIF vdev handle
  1443. * @txrx_ops: Tx and Rx operations
  1444. *
  1445. * Return: DP VDEV handle on success, NULL on failure
  1446. */
  1447. static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
  1448. void *osif_vdev,
  1449. struct ol_txrx_ops *txrx_ops)
  1450. {
  1451. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1452. vdev->osif_vdev = osif_vdev;
  1453. vdev->osif_rx = txrx_ops->rx.rx;
  1454. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  1455. vdev->osif_rx_mon = txrx_ops->rx.mon;
  1456. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  1457. #ifdef notyet
  1458. #if ATH_SUPPORT_WAPI
  1459. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  1460. #endif
  1461. #if UMAC_SUPPORT_PROXY_ARP
  1462. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  1463. #endif
  1464. #endif
  1465. vdev->me_convert = txrx_ops->me_convert;
  1466. /* TODO: Enable the following once Tx code is integrated */
  1467. txrx_ops->tx.tx = dp_tx_send;
  1468. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1469. "DP Vdev Register success");
  1470. }
  1471. /*
  1472. * dp_vdev_detach_wifi3() - Detach txrx vdev
  1473. * @txrx_vdev: Datapath VDEV handle
  1474. * @callback: Callback OL_IF on completion of detach
  1475. * @cb_context: Callback context
  1476. *
  1477. */
  1478. static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
  1479. ol_txrx_vdev_delete_cb callback, void *cb_context)
  1480. {
  1481. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1482. struct dp_pdev *pdev = vdev->pdev;
  1483. struct dp_soc *soc = pdev->soc;
  1484. /* preconditions */
  1485. qdf_assert(vdev);
  1486. /* remove the vdev from its parent pdev's list */
  1487. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  1488. /*
  1489. * Use peer_ref_mutex while accessing peer_list, in case
  1490. * a peer is in the process of being removed from the list.
  1491. */
  1492. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1493. /* check that the vdev has no peers allocated */
  1494. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  1495. /* debug print - will be removed later */
  1496. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  1497. FL("not deleting vdev object %p (%pM)"
  1498. "until deletion finishes for all its peers"),
  1499. vdev, vdev->mac_addr.raw);
  1500. /* indicate that the vdev needs to be deleted */
  1501. vdev->delete.pending = 1;
  1502. vdev->delete.callback = callback;
  1503. vdev->delete.context = cb_context;
  1504. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1505. return;
  1506. }
  1507. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1508. dp_tx_vdev_detach(vdev);
  1509. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1510. FL("deleting vdev object %p (%pM)"), vdev, vdev->mac_addr.raw);
  1511. qdf_mem_free(vdev);
  1512. if (callback)
  1513. callback(cb_context);
  1514. }
  1515. /*
  1516. * dp_peer_create_wifi3() - attach txrx peer
  1517. * @txrx_vdev: Datapath VDEV handle
  1518. * @peer_mac_addr: Peer MAC address
  1519. *
  1520. * Return: DP peeer handle on success, NULL on failure
  1521. */
  1522. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  1523. uint8_t *peer_mac_addr)
  1524. {
  1525. struct dp_peer *peer;
  1526. int i;
  1527. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1528. struct dp_pdev *pdev;
  1529. struct dp_soc *soc;
  1530. /* preconditions */
  1531. qdf_assert(vdev);
  1532. qdf_assert(peer_mac_addr);
  1533. pdev = vdev->pdev;
  1534. soc = pdev->soc;
  1535. #ifdef notyet
  1536. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  1537. soc->mempool_ol_ath_peer);
  1538. #else
  1539. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  1540. #endif
  1541. if (!peer)
  1542. return NULL; /* failure */
  1543. qdf_mem_zero(peer, sizeof(struct dp_peer));
  1544. TAILQ_INIT(&peer->ast_entry_list);
  1545. qdf_mem_copy(&peer->self_ast_entry.mac_addr, peer_mac_addr,
  1546. DP_MAC_ADDR_LEN);
  1547. peer->self_ast_entry.peer = peer;
  1548. TAILQ_INSERT_TAIL(&peer->ast_entry_list, &peer->self_ast_entry,
  1549. ast_entry_elem);
  1550. qdf_spinlock_create(&peer->peer_info_lock);
  1551. /* store provided params */
  1552. peer->vdev = vdev;
  1553. qdf_mem_copy(
  1554. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  1555. /* TODO: See of rx_opt_proc is really required */
  1556. peer->rx_opt_proc = soc->rx_opt_proc;
  1557. /* initialize the peer_id */
  1558. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  1559. peer->peer_ids[i] = HTT_INVALID_PEER;
  1560. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1561. qdf_atomic_init(&peer->ref_cnt);
  1562. /* keep one reference for attach */
  1563. qdf_atomic_inc(&peer->ref_cnt);
  1564. /* add this peer into the vdev's list */
  1565. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  1566. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1567. /* TODO: See if hash based search is required */
  1568. dp_peer_find_hash_add(soc, peer);
  1569. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1570. "vdev %p created peer %p (%pM) ref_cnt: %d",
  1571. vdev, peer, peer->mac_addr.raw,
  1572. qdf_atomic_read(&peer->ref_cnt));
  1573. /*
  1574. * For every peer MAp message search and set if bss_peer
  1575. */
  1576. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  1577. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1578. "vdev bss_peer!!!!");
  1579. peer->bss_peer = 1;
  1580. vdev->vap_bss_peer = peer;
  1581. }
  1582. #ifndef CONFIG_WIN
  1583. dp_local_peer_id_alloc(pdev, peer);
  1584. #endif
  1585. DP_STATS_INIT(peer);
  1586. return (void *)peer;
  1587. }
  1588. /*
  1589. * dp_peer_setup_wifi3() - initialize the peer
  1590. * @vdev_hdl: virtual device object
  1591. * @peer: Peer object
  1592. *
  1593. * Return: void
  1594. */
  1595. static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  1596. {
  1597. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  1598. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  1599. struct dp_pdev *pdev;
  1600. struct dp_soc *soc;
  1601. bool hash_based = 0;
  1602. enum cdp_host_reo_dest_ring reo_dest;
  1603. /* preconditions */
  1604. qdf_assert(vdev);
  1605. qdf_assert(peer);
  1606. pdev = vdev->pdev;
  1607. soc = pdev->soc;
  1608. dp_peer_rx_init(pdev, peer);
  1609. peer->last_assoc_rcvd = 0;
  1610. peer->last_disassoc_rcvd = 0;
  1611. peer->last_deauth_rcvd = 0;
  1612. hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  1613. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1614. FL("hash based steering %d\n"), hash_based);
  1615. if (!hash_based)
  1616. reo_dest = pdev->reo_dest;
  1617. else
  1618. reo_dest = 1;
  1619. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  1620. /* TODO: Check the destination ring number to be passed to FW */
  1621. soc->cdp_soc.ol_ops->peer_set_default_routing(
  1622. pdev->osif_pdev, peer->mac_addr.raw,
  1623. peer->vdev->vdev_id, hash_based, reo_dest);
  1624. }
  1625. return;
  1626. }
  1627. /*
  1628. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  1629. * @vdev_handle: virtual device object
  1630. * @htt_pkt_type: type of pkt
  1631. *
  1632. * Return: void
  1633. */
  1634. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  1635. enum htt_cmn_pkt_type val)
  1636. {
  1637. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1638. vdev->tx_encap_type = val;
  1639. }
  1640. /*
  1641. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  1642. * @vdev_handle: virtual device object
  1643. * @htt_pkt_type: type of pkt
  1644. *
  1645. * Return: void
  1646. */
  1647. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  1648. enum htt_cmn_pkt_type val)
  1649. {
  1650. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1651. vdev->rx_decap_type = val;
  1652. }
  1653. /*
  1654. * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
  1655. * @pdev_handle: physical device object
  1656. * @val: reo destination ring index (1 - 4)
  1657. *
  1658. * Return: void
  1659. */
  1660. static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
  1661. enum cdp_host_reo_dest_ring val)
  1662. {
  1663. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  1664. if (pdev)
  1665. pdev->reo_dest = val;
  1666. }
  1667. /*
  1668. * dp_get_pdev_reo_dest() - get the reo destination for this pdev
  1669. * @pdev_handle: physical device object
  1670. *
  1671. * Return: reo destination ring index
  1672. */
  1673. static enum cdp_host_reo_dest_ring
  1674. dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
  1675. {
  1676. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  1677. if (pdev)
  1678. return pdev->reo_dest;
  1679. else
  1680. return cdp_host_reo_dest_ring_unknown;
  1681. }
  1682. /*
  1683. * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
  1684. * @pdev_handle: device object
  1685. * @val: value to be set
  1686. *
  1687. * Return: void
  1688. */
  1689. static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  1690. uint32_t val)
  1691. {
  1692. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  1693. /* Enable/Disable smart mesh filtering. This flag will be checked
  1694. * during rx processing to check if packets are from NAC clients.
  1695. */
  1696. pdev->filter_neighbour_peers = val;
  1697. return 0;
  1698. }
  1699. /*
  1700. * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
  1701. * address for smart mesh filtering
  1702. * @pdev_handle: device object
  1703. * @cmd: Add/Del command
  1704. * @macaddr: nac client mac address
  1705. *
  1706. * Return: void
  1707. */
  1708. static int dp_update_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
  1709. uint32_t cmd, uint8_t *macaddr)
  1710. {
  1711. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  1712. struct dp_neighbour_peer *peer = NULL;
  1713. if (!macaddr)
  1714. goto fail0;
  1715. /* Store address of NAC (neighbour peer) which will be checked
  1716. * against TA of received packets.
  1717. */
  1718. if (cmd == DP_NAC_PARAM_ADD) {
  1719. peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
  1720. sizeof(*peer));
  1721. if (!peer) {
  1722. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1723. FL("DP neighbour peer node memory allocation failed"));
  1724. goto fail0;
  1725. }
  1726. qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
  1727. macaddr, DP_MAC_ADDR_LEN);
  1728. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  1729. /* add this neighbour peer into the list */
  1730. TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
  1731. neighbour_peer_list_elem);
  1732. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  1733. return 1;
  1734. } else if (cmd == DP_NAC_PARAM_DEL) {
  1735. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  1736. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  1737. neighbour_peer_list_elem) {
  1738. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  1739. macaddr, DP_MAC_ADDR_LEN)) {
  1740. /* delete this peer from the list */
  1741. TAILQ_REMOVE(&pdev->neighbour_peers_list,
  1742. peer, neighbour_peer_list_elem);
  1743. qdf_mem_free(peer);
  1744. break;
  1745. }
  1746. }
  1747. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  1748. return 1;
  1749. }
  1750. fail0:
  1751. return 0;
  1752. }
  1753. /*
  1754. * dp_peer_authorize() - authorize txrx peer
  1755. * @peer_handle: Datapath peer handle
  1756. * @authorize
  1757. *
  1758. */
  1759. static void dp_peer_authorize(void *peer_handle, uint32_t authorize)
  1760. {
  1761. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1762. struct dp_soc *soc;
  1763. if (peer != NULL) {
  1764. soc = peer->vdev->pdev->soc;
  1765. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1766. peer->authorize = authorize ? 1 : 0;
  1767. #ifdef notyet /* ATH_BAND_STEERING */
  1768. peer->peer_bs_inact_flag = 0;
  1769. peer->peer_bs_inact = soc->pdev_bs_inact_reload;
  1770. #endif
  1771. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1772. }
  1773. }
  1774. /*
  1775. * dp_peer_unref_delete() - unref and delete peer
  1776. * @peer_handle: Datapath peer handle
  1777. *
  1778. */
  1779. void dp_peer_unref_delete(void *peer_handle)
  1780. {
  1781. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1782. struct dp_vdev *vdev = peer->vdev;
  1783. struct dp_pdev *pdev = vdev->pdev;
  1784. struct dp_soc *soc = pdev->soc;
  1785. struct dp_peer *tmppeer;
  1786. int found = 0;
  1787. uint16_t peer_id;
  1788. uint16_t hw_peer_id;
  1789. struct dp_ast_entry *ast_entry;
  1790. /*
  1791. * Hold the lock all the way from checking if the peer ref count
  1792. * is zero until the peer references are removed from the hash
  1793. * table and vdev list (if the peer ref count is zero).
  1794. * This protects against a new HL tx operation starting to use the
  1795. * peer object just after this function concludes it's done being used.
  1796. * Furthermore, the lock needs to be held while checking whether the
  1797. * vdev's list of peers is empty, to make sure that list is not modified
  1798. * concurrently with the empty check.
  1799. */
  1800. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1801. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1802. "%s: peer %p ref_cnt(before decrement): %d\n", __func__,
  1803. peer, qdf_atomic_read(&peer->ref_cnt));
  1804. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  1805. peer_id = peer->peer_ids[0];
  1806. /*
  1807. * Make sure that the reference to the peer in
  1808. * peer object map is removed
  1809. */
  1810. if (peer_id != HTT_INVALID_PEER)
  1811. soc->peer_id_to_obj_map[peer_id] = NULL;
  1812. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1813. "Deleting peer %p (%pM)", peer, peer->mac_addr.raw);
  1814. /* remove the reference to the peer from the hash table */
  1815. dp_peer_find_hash_remove(soc, peer);
  1816. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  1817. if (tmppeer == peer) {
  1818. found = 1;
  1819. break;
  1820. }
  1821. }
  1822. if (found) {
  1823. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  1824. peer_list_elem);
  1825. } else {
  1826. /*Ignoring the remove operation as peer not found*/
  1827. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  1828. "peer %p not found in vdev (%p)->peer_list:%p",
  1829. peer, vdev, &peer->vdev->peer_list);
  1830. }
  1831. /* cleanup the peer data */
  1832. dp_peer_cleanup(vdev, peer);
  1833. /* check whether the parent vdev has no peers left */
  1834. if (TAILQ_EMPTY(&vdev->peer_list)) {
  1835. /*
  1836. * Now that there are no references to the peer, we can
  1837. * release the peer reference lock.
  1838. */
  1839. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1840. /*
  1841. * Check if the parent vdev was waiting for its peers
  1842. * to be deleted, in order for it to be deleted too.
  1843. */
  1844. if (vdev->delete.pending) {
  1845. ol_txrx_vdev_delete_cb vdev_delete_cb =
  1846. vdev->delete.callback;
  1847. void *vdev_delete_context =
  1848. vdev->delete.context;
  1849. QDF_TRACE(QDF_MODULE_ID_DP,
  1850. QDF_TRACE_LEVEL_INFO_HIGH,
  1851. FL("deleting vdev object %p (%pM)"
  1852. " - its last peer is done"),
  1853. vdev, vdev->mac_addr.raw);
  1854. /* all peers are gone, go ahead and delete it */
  1855. qdf_mem_free(vdev);
  1856. if (vdev_delete_cb)
  1857. vdev_delete_cb(vdev_delete_context);
  1858. }
  1859. } else {
  1860. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1861. }
  1862. #ifdef notyet
  1863. qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
  1864. #else
  1865. TAILQ_FOREACH(ast_entry, &peer->ast_entry_list,
  1866. ast_entry_elem) {
  1867. hw_peer_id = ast_entry->ast_idx;
  1868. if (peer->self_ast_entry.ast_idx != hw_peer_id)
  1869. qdf_mem_free(ast_entry);
  1870. else
  1871. peer->self_ast_entry.ast_idx =
  1872. HTT_INVALID_PEER;
  1873. soc->ast_table[hw_peer_id] = NULL;
  1874. }
  1875. qdf_mem_free(peer);
  1876. #endif
  1877. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  1878. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
  1879. vdev->vdev_id, peer->mac_addr.raw);
  1880. }
  1881. } else {
  1882. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1883. }
  1884. }
  1885. /*
  1886. * dp_peer_detach_wifi3() – Detach txrx peer
  1887. * @peer_handle: Datapath peer handle
  1888. *
  1889. */
  1890. static void dp_peer_delete_wifi3(void *peer_handle)
  1891. {
  1892. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1893. /* redirect the peer's rx delivery function to point to a
  1894. * discard func
  1895. */
  1896. peer->rx_opt_proc = dp_rx_discard;
  1897. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1898. FL("peer %p (%pM)"), peer, peer->mac_addr.raw);
  1899. #ifndef CONFIG_WIN
  1900. dp_local_peer_id_free(peer->vdev->pdev, peer);
  1901. #endif
  1902. qdf_spinlock_destroy(&peer->peer_info_lock);
  1903. /*
  1904. * Remove the reference added during peer_attach.
  1905. * The peer will still be left allocated until the
  1906. * PEER_UNMAP message arrives to remove the other
  1907. * reference, added by the PEER_MAP message.
  1908. */
  1909. dp_peer_unref_delete(peer_handle);
  1910. }
  1911. /*
  1912. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  1913. * @peer_handle: Datapath peer handle
  1914. *
  1915. */
  1916. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
  1917. {
  1918. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  1919. return vdev->mac_addr.raw;
  1920. }
  1921. /*
  1922. * dp_vdev_set_wds() - Enable per packet stats
  1923. * @vdev_handle: DP VDEV handle
  1924. * @val: value
  1925. *
  1926. * Return: none
  1927. */
  1928. static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
  1929. {
  1930. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1931. vdev->wds_enabled = val;
  1932. return 0;
  1933. }
  1934. /*
  1935. * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
  1936. * @peer_handle: Datapath peer handle
  1937. *
  1938. */
  1939. static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
  1940. uint8_t vdev_id)
  1941. {
  1942. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  1943. struct dp_vdev *vdev = NULL;
  1944. if (qdf_unlikely(!pdev))
  1945. return NULL;
  1946. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1947. if (vdev->vdev_id == vdev_id)
  1948. break;
  1949. }
  1950. return (struct cdp_vdev *)vdev;
  1951. }
  1952. static int dp_get_opmode(struct cdp_vdev *vdev_handle)
  1953. {
  1954. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1955. return vdev->opmode;
  1956. }
  1957. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
  1958. {
  1959. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  1960. struct dp_pdev *pdev = vdev->pdev;
  1961. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  1962. }
  1963. /**
  1964. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  1965. * @vdev_handle: Datapath VDEV handle
  1966. * @smart_monitor: Flag to denote if its smart monitor mode
  1967. *
  1968. * Return: 0 on success, not 0 on failure
  1969. */
  1970. static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
  1971. uint8_t smart_monitor)
  1972. {
  1973. /* Many monitor VAPs can exists in a system but only one can be up at
  1974. * anytime
  1975. */
  1976. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1977. struct dp_pdev *pdev;
  1978. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  1979. struct dp_soc *soc;
  1980. uint8_t pdev_id;
  1981. qdf_assert(vdev);
  1982. pdev = vdev->pdev;
  1983. pdev_id = pdev->pdev_id;
  1984. soc = pdev->soc;
  1985. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  1986. "pdev=%p, pdev_id=%d, soc=%p vdev=%p\n",
  1987. pdev, pdev_id, soc, vdev);
  1988. /*Check if current pdev's monitor_vdev exists */
  1989. if (pdev->monitor_vdev) {
  1990. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1991. "vdev=%p\n", vdev);
  1992. qdf_assert(vdev);
  1993. }
  1994. pdev->monitor_vdev = vdev;
  1995. /* If smart monitor mode, do not configure monitor ring */
  1996. if (smart_monitor)
  1997. return QDF_STATUS_SUCCESS;
  1998. htt_tlv_filter.mpdu_start = 1;
  1999. htt_tlv_filter.msdu_start = 1;
  2000. htt_tlv_filter.packet = 1;
  2001. htt_tlv_filter.msdu_end = 1;
  2002. htt_tlv_filter.mpdu_end = 1;
  2003. htt_tlv_filter.packet_header = 1;
  2004. htt_tlv_filter.attention = 1;
  2005. htt_tlv_filter.ppdu_start = 0;
  2006. htt_tlv_filter.ppdu_end = 0;
  2007. htt_tlv_filter.ppdu_end_user_stats = 0;
  2008. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  2009. htt_tlv_filter.ppdu_end_status_done = 0;
  2010. htt_tlv_filter.enable_fp = 1;
  2011. htt_tlv_filter.enable_md = 0;
  2012. htt_tlv_filter.enable_mo = 1;
  2013. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  2014. pdev->rxdma_mon_dst_ring.hal_srng,
  2015. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
  2016. htt_tlv_filter.mpdu_start = 1;
  2017. htt_tlv_filter.msdu_start = 1;
  2018. htt_tlv_filter.packet = 0;
  2019. htt_tlv_filter.msdu_end = 1;
  2020. htt_tlv_filter.mpdu_end = 1;
  2021. htt_tlv_filter.packet_header = 1;
  2022. htt_tlv_filter.attention = 1;
  2023. htt_tlv_filter.ppdu_start = 1;
  2024. htt_tlv_filter.ppdu_end = 1;
  2025. htt_tlv_filter.ppdu_end_user_stats = 1;
  2026. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  2027. htt_tlv_filter.ppdu_end_status_done = 1;
  2028. htt_tlv_filter.enable_fp = 1;
  2029. htt_tlv_filter.enable_md = 1;
  2030. htt_tlv_filter.enable_mo = 1;
  2031. /*
  2032. * htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  2033. * pdev->rxdma_mon_status_ring.hal_srng,
  2034. * RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  2035. */
  2036. return QDF_STATUS_SUCCESS;
  2037. }
  2038. #ifdef MESH_MODE_SUPPORT
  2039. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  2040. {
  2041. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  2042. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2043. FL("val %d"), val);
  2044. vdev->mesh_vdev = val;
  2045. }
  2046. /*
  2047. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  2048. * @vdev_hdl: virtual device object
  2049. * @val: value to be set
  2050. *
  2051. * Return: void
  2052. */
  2053. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  2054. {
  2055. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  2056. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2057. FL("val %d"), val);
  2058. vdev->mesh_rx_filter = val;
  2059. }
  2060. #endif
  2061. /**
  2062. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  2063. * @vdev: DP VDEV handle
  2064. *
  2065. * return: void
  2066. */
  2067. void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
  2068. {
  2069. struct dp_peer *peer = NULL;
  2070. int i;
  2071. qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
  2072. qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
  2073. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2074. if (!peer)
  2075. return;
  2076. for (i = 0; i <= MAX_MCS; i++) {
  2077. DP_STATS_AGGR(vdev, peer, tx.pkt_type[0].mcs_count[i]);
  2078. DP_STATS_AGGR(vdev, peer, tx.pkt_type[1].mcs_count[i]);
  2079. DP_STATS_AGGR(vdev, peer, tx.pkt_type[2].mcs_count[i]);
  2080. DP_STATS_AGGR(vdev, peer, tx.pkt_type[3].mcs_count[i]);
  2081. DP_STATS_AGGR(vdev, peer, tx.pkt_type[4].mcs_count[i]);
  2082. DP_STATS_AGGR(vdev, peer, rx.pkt_type[0].mcs_count[i]);
  2083. DP_STATS_AGGR(vdev, peer, rx.pkt_type[1].mcs_count[i]);
  2084. DP_STATS_AGGR(vdev, peer, rx.pkt_type[2].mcs_count[i]);
  2085. DP_STATS_AGGR(vdev, peer, rx.pkt_type[3].mcs_count[i]);
  2086. DP_STATS_AGGR(vdev, peer, rx.pkt_type[4].mcs_count[i]);
  2087. }
  2088. for (i = 0; i < SUPPORTED_BW; i++) {
  2089. DP_STATS_AGGR(vdev, peer, tx.bw[i]);
  2090. DP_STATS_AGGR(vdev, peer, rx.bw[i]);
  2091. }
  2092. for (i = 0; i < SS_COUNT; i++)
  2093. DP_STATS_AGGR(vdev, peer, rx.nss[i]);
  2094. for (i = 0; i < WME_AC_MAX; i++) {
  2095. DP_STATS_AGGR(vdev, peer, tx.wme_ac_type[i]);
  2096. DP_STATS_AGGR(vdev, peer, rx.wme_ac_type[i]);
  2097. DP_STATS_AGGR(vdev, peer, tx.excess_retries_ac[i]);
  2098. }
  2099. for (i = 0; i < MAX_MCS + 1; i++) {
  2100. DP_STATS_AGGR(vdev, peer, tx.sgi_count[i]);
  2101. DP_STATS_AGGR(vdev, peer, rx.sgi_count[i]);
  2102. }
  2103. DP_STATS_AGGR_PKT(vdev, peer, tx.comp_pkt);
  2104. DP_STATS_AGGR_PKT(vdev, peer, tx.ucast);
  2105. DP_STATS_AGGR_PKT(vdev, peer, tx.mcast);
  2106. DP_STATS_AGGR_PKT(vdev, peer, tx.tx_success);
  2107. DP_STATS_AGGR(vdev, peer, tx.tx_failed);
  2108. DP_STATS_AGGR(vdev, peer, tx.ofdma);
  2109. DP_STATS_AGGR(vdev, peer, tx.stbc);
  2110. DP_STATS_AGGR(vdev, peer, tx.ldpc);
  2111. DP_STATS_AGGR(vdev, peer, tx.retries);
  2112. DP_STATS_AGGR(vdev, peer, tx.non_amsdu_cnt);
  2113. DP_STATS_AGGR(vdev, peer, tx.amsdu_cnt);
  2114. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard);
  2115. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_retired);
  2116. DP_STATS_AGGR(vdev, peer, tx.dropped.mpdu_age_out);
  2117. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason1);
  2118. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason2);
  2119. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason3);
  2120. DP_STATS_AGGR(vdev, peer, rx.err.mic_err);
  2121. DP_STATS_AGGR(vdev, peer, rx.err.decrypt_err);
  2122. DP_STATS_AGGR(vdev, peer, rx.non_ampdu_cnt);
  2123. DP_STATS_AGGR(vdev, peer, rx.ampdu_cnt);
  2124. DP_STATS_AGGR(vdev, peer, rx.non_amsdu_cnt);
  2125. DP_STATS_AGGR(vdev, peer, rx.amsdu_cnt);
  2126. DP_STATS_AGGR_PKT(vdev, peer, rx.to_stack);
  2127. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  2128. DP_STATS_AGGR_PKT(vdev, peer, rx.rcvd_reo[i]);
  2129. peer->stats.rx.unicast.num = peer->stats.rx.to_stack.num -
  2130. peer->stats.rx.multicast.num;
  2131. peer->stats.rx.unicast.bytes = peer->stats.rx.to_stack.bytes -
  2132. peer->stats.rx.multicast.bytes;
  2133. DP_STATS_AGGR_PKT(vdev, peer, rx.unicast);
  2134. DP_STATS_AGGR_PKT(vdev, peer, rx.multicast);
  2135. DP_STATS_AGGR_PKT(vdev, peer, rx.wds);
  2136. DP_STATS_AGGR_PKT(vdev, peer, rx.raw);
  2137. DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss.pkts);
  2138. DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss.fail);
  2139. vdev->stats.tx.last_ack_rssi =
  2140. peer->stats.tx.last_ack_rssi;
  2141. }
  2142. }
  2143. /**
  2144. * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
  2145. * @pdev: DP PDEV handle
  2146. *
  2147. * return: void
  2148. */
  2149. static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  2150. {
  2151. struct dp_vdev *vdev = NULL;
  2152. uint8_t i;
  2153. qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
  2154. qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
  2155. qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
  2156. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  2157. if (!vdev)
  2158. return;
  2159. dp_aggregate_vdev_stats(vdev);
  2160. for (i = 0; i <= MAX_MCS; i++) {
  2161. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[0].mcs_count[i]);
  2162. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[1].mcs_count[i]);
  2163. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[2].mcs_count[i]);
  2164. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[3].mcs_count[i]);
  2165. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[4].mcs_count[i]);
  2166. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[0].mcs_count[i]);
  2167. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[1].mcs_count[i]);
  2168. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[2].mcs_count[i]);
  2169. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[3].mcs_count[i]);
  2170. DP_STATS_AGGR(pdev, vdev, rx.pkt_type[4].mcs_count[i]);
  2171. }
  2172. for (i = 0; i < SUPPORTED_BW; i++) {
  2173. DP_STATS_AGGR(pdev, vdev, tx.bw[i]);
  2174. DP_STATS_AGGR(pdev, vdev, rx.bw[i]);
  2175. }
  2176. for (i = 0; i < SS_COUNT; i++)
  2177. DP_STATS_AGGR(pdev, vdev, rx.nss[i]);
  2178. for (i = 0; i < WME_AC_MAX; i++) {
  2179. DP_STATS_AGGR(pdev, vdev, tx.wme_ac_type[i]);
  2180. DP_STATS_AGGR(pdev, vdev, rx.wme_ac_type[i]);
  2181. DP_STATS_AGGR(pdev, vdev,
  2182. tx.excess_retries_ac[i]);
  2183. }
  2184. for (i = 0; i < MAX_MCS + 1; i++) {
  2185. DP_STATS_AGGR(pdev, vdev, tx.sgi_count[i]);
  2186. DP_STATS_AGGR(pdev, vdev, rx.sgi_count[i]);
  2187. }
  2188. DP_STATS_AGGR_PKT(pdev, vdev, tx.comp_pkt);
  2189. DP_STATS_AGGR_PKT(pdev, vdev, tx.ucast);
  2190. DP_STATS_AGGR_PKT(pdev, vdev, tx.mcast);
  2191. DP_STATS_AGGR_PKT(pdev, vdev, tx.tx_success);
  2192. DP_STATS_AGGR(pdev, vdev, tx.tx_failed);
  2193. DP_STATS_AGGR(pdev, vdev, tx.ofdma);
  2194. DP_STATS_AGGR(pdev, vdev, tx.stbc);
  2195. DP_STATS_AGGR(pdev, vdev, tx.ldpc);
  2196. DP_STATS_AGGR(pdev, vdev, tx.retries);
  2197. DP_STATS_AGGR(pdev, vdev, tx.non_amsdu_cnt);
  2198. DP_STATS_AGGR(pdev, vdev, tx.amsdu_cnt);
  2199. DP_STATS_AGGR(pdev, vdev, tx.dropped.fw_discard);
  2200. DP_STATS_AGGR(pdev, vdev,
  2201. tx.dropped.fw_discard_retired);
  2202. DP_STATS_AGGR(pdev, vdev, tx.dropped.mpdu_age_out);
  2203. DP_STATS_AGGR(pdev, vdev,
  2204. tx.dropped.fw_discard_reason1);
  2205. DP_STATS_AGGR(pdev, vdev,
  2206. tx.dropped.fw_discard_reason2);
  2207. DP_STATS_AGGR(pdev, vdev,
  2208. tx.dropped.fw_discard_reason3);
  2209. DP_STATS_AGGR(pdev, vdev, rx.err.mic_err);
  2210. DP_STATS_AGGR(pdev, vdev, rx.err.decrypt_err);
  2211. DP_STATS_AGGR(pdev, vdev, rx.non_ampdu_cnt);
  2212. DP_STATS_AGGR(pdev, vdev, rx.ampdu_cnt);
  2213. DP_STATS_AGGR(pdev, vdev, rx.non_amsdu_cnt);
  2214. DP_STATS_AGGR(pdev, vdev, rx.amsdu_cnt);
  2215. DP_STATS_AGGR_PKT(pdev, vdev, rx.to_stack);
  2216. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[0]);
  2217. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[1]);
  2218. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[2]);
  2219. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo[3]);
  2220. DP_STATS_AGGR_PKT(pdev, vdev, rx.unicast);
  2221. DP_STATS_AGGR_PKT(pdev, vdev, rx.multicast);
  2222. DP_STATS_AGGR_PKT(pdev, vdev, rx.wds);
  2223. DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.pkts);
  2224. DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss.fail);
  2225. DP_STATS_AGGR_PKT(pdev, vdev, rx.raw);
  2226. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
  2227. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
  2228. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
  2229. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
  2230. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw.raw_pkt);
  2231. DP_STATS_AGGR(pdev, vdev, tx_i.raw.dma_map_error);
  2232. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
  2233. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
  2234. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
  2235. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
  2236. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
  2237. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
  2238. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
  2239. DP_STATS_AGGR(pdev, vdev,
  2240. tx_i.mcast_en.dropped_map_error);
  2241. DP_STATS_AGGR(pdev, vdev,
  2242. tx_i.mcast_en.dropped_self_mac);
  2243. DP_STATS_AGGR(pdev, vdev,
  2244. tx_i.mcast_en.dropped_send_fail);
  2245. DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
  2246. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.dma_error);
  2247. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.ring_full);
  2248. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.enqueue_fail);
  2249. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.desc_na);
  2250. DP_STATS_AGGR(pdev, vdev, tx_i.dropped.res_full);
  2251. pdev->stats.tx_i.dropped.dropped_pkt.num =
  2252. pdev->stats.tx_i.dropped.dma_error +
  2253. pdev->stats.tx_i.dropped.ring_full +
  2254. pdev->stats.tx_i.dropped.enqueue_fail +
  2255. pdev->stats.tx_i.dropped.desc_na +
  2256. pdev->stats.tx_i.dropped.res_full;
  2257. pdev->stats.tx.last_ack_rssi =
  2258. vdev->stats.tx.last_ack_rssi;
  2259. pdev->stats.tx_i.tso.num_seg =
  2260. vdev->stats.tx_i.tso.num_seg;
  2261. }
  2262. }
  2263. /**
  2264. * dp_print_pdev_tx_stats(): Print Pdev level TX stats
  2265. * @pdev: DP_PDEV Handle
  2266. *
  2267. * Return:void
  2268. */
  2269. static inline void
  2270. dp_print_pdev_tx_stats(struct dp_pdev *pdev)
  2271. {
  2272. DP_TRACE_STATS(NONE, "WLAN Tx Stats:\n");
  2273. DP_TRACE_STATS(NONE, "Received From Stack:\n");
  2274. DP_TRACE_STATS(NONE, "Packets = %d",
  2275. pdev->stats.tx_i.rcvd.num);
  2276. DP_TRACE_STATS(NONE, "Bytes = %d\n",
  2277. pdev->stats.tx_i.rcvd.bytes);
  2278. DP_TRACE_STATS(NONE, "Processed:\n");
  2279. DP_TRACE_STATS(NONE, "Packets = %d",
  2280. pdev->stats.tx_i.processed.num);
  2281. DP_TRACE_STATS(NONE, "Bytes = %d\n",
  2282. pdev->stats.tx_i.processed.bytes);
  2283. DP_TRACE_STATS(NONE, "Completions:\n");
  2284. DP_TRACE_STATS(NONE, "Packets = %d",
  2285. pdev->stats.tx.comp_pkt.num);
  2286. DP_TRACE_STATS(NONE, "Bytes = %d\n",
  2287. pdev->stats.tx.comp_pkt.bytes);
  2288. DP_TRACE_STATS(NONE, "Dropped:\n");
  2289. DP_TRACE_STATS(NONE, "Packets = %d",
  2290. pdev->stats.tx_i.dropped.dropped_pkt.num);
  2291. DP_TRACE_STATS(NONE, "Dma_map_error = %d",
  2292. pdev->stats.tx_i.dropped.dma_error);
  2293. DP_TRACE_STATS(NONE, "Ring Full = %d",
  2294. pdev->stats.tx_i.dropped.ring_full);
  2295. DP_TRACE_STATS(NONE, "Descriptor Not available = %d",
  2296. pdev->stats.tx_i.dropped.desc_na);
  2297. DP_TRACE_STATS(NONE, "HW enqueue failed= %d",
  2298. pdev->stats.tx_i.dropped.enqueue_fail);
  2299. DP_TRACE_STATS(NONE, "Resources Full = %d",
  2300. pdev->stats.tx_i.dropped.res_full);
  2301. DP_TRACE_STATS(NONE, "Fw Discard = %d",
  2302. pdev->stats.tx.dropped.fw_discard);
  2303. DP_TRACE_STATS(NONE, "Fw Discard Retired = %d",
  2304. pdev->stats.tx.dropped.fw_discard_retired);
  2305. DP_TRACE_STATS(NONE, "Firmware Discard Untransmitted = %d",
  2306. pdev->stats.tx.dropped.fw_discard_untransmitted);
  2307. DP_TRACE_STATS(NONE, "Mpdu Age Out = %d",
  2308. pdev->stats.tx.dropped.mpdu_age_out);
  2309. DP_TRACE_STATS(NONE, "Firmware Discard Reason1 = %d",
  2310. pdev->stats.tx.dropped.fw_discard_reason1);
  2311. DP_TRACE_STATS(NONE, "Firmware Discard Reason2 = %d",
  2312. pdev->stats.tx.dropped.fw_discard_reason2);
  2313. DP_TRACE_STATS(NONE, "Firmware Discard Reason3 = %d\n",
  2314. pdev->stats.tx.dropped.fw_discard_reason3);
  2315. DP_TRACE_STATS(NONE, "Scatter Gather:\n");
  2316. DP_TRACE_STATS(NONE, "Packets = %d",
  2317. pdev->stats.tx_i.sg.sg_pkt.num);
  2318. DP_TRACE_STATS(NONE, "Bytes = %d",
  2319. pdev->stats.tx_i.sg.sg_pkt.bytes);
  2320. DP_TRACE_STATS(NONE, "Dropped By Host = %d",
  2321. pdev->stats.tx_i.sg.dropped_host);
  2322. DP_TRACE_STATS(NONE, "Dropped By Target = %d\n",
  2323. pdev->stats.tx_i.sg.dropped_target);
  2324. DP_TRACE_STATS(NONE, "Tso:\n");
  2325. DP_TRACE_STATS(NONE, "Number of Segments = %d",
  2326. pdev->stats.tx_i.tso.num_seg);
  2327. DP_TRACE_STATS(NONE, "Packets = %d",
  2328. pdev->stats.tx_i.tso.tso_pkt.num);
  2329. DP_TRACE_STATS(NONE, "Bytes = %d",
  2330. pdev->stats.tx_i.tso.tso_pkt.bytes);
  2331. DP_TRACE_STATS(NONE, "Dropped By Host = %d\n",
  2332. pdev->stats.tx_i.tso.dropped_host);
  2333. DP_TRACE_STATS(NONE, "Mcast Enhancement:\n");
  2334. DP_TRACE_STATS(NONE, "Packets = %d",
  2335. pdev->stats.tx_i.mcast_en.mcast_pkt.num);
  2336. DP_TRACE_STATS(NONE, "Bytes = %d",
  2337. pdev->stats.tx_i.mcast_en.mcast_pkt.bytes);
  2338. DP_TRACE_STATS(NONE, "Dropped: Map Errors = %d",
  2339. pdev->stats.tx_i.mcast_en.dropped_map_error);
  2340. DP_TRACE_STATS(NONE, "Dropped: Self Mac = %d",
  2341. pdev->stats.tx_i.mcast_en.dropped_self_mac);
  2342. DP_TRACE_STATS(NONE, "Dropped: Send Fail = %d",
  2343. pdev->stats.tx_i.mcast_en.dropped_send_fail);
  2344. DP_TRACE_STATS(NONE, "Unicast sent = %d\n",
  2345. pdev->stats.tx_i.mcast_en.ucast);
  2346. DP_TRACE_STATS(NONE, "Raw:\n");
  2347. DP_TRACE_STATS(NONE, "Packets = %d",
  2348. pdev->stats.tx_i.raw.raw_pkt.num);
  2349. DP_TRACE_STATS(NONE, "Bytes = %d",
  2350. pdev->stats.tx_i.raw.raw_pkt.bytes);
  2351. DP_TRACE_STATS(NONE, "DMA map error = %d\n",
  2352. pdev->stats.tx_i.raw.dma_map_error);
  2353. DP_TRACE_STATS(NONE, "Reinjected:\n");
  2354. DP_TRACE_STATS(NONE, "Packets = %d",
  2355. pdev->stats.tx_i.reinject_pkts.num);
  2356. DP_TRACE_STATS(NONE, "Bytes = %d\n",
  2357. pdev->stats.tx_i.reinject_pkts.bytes);
  2358. DP_TRACE_STATS(NONE, "Inspected:\n");
  2359. DP_TRACE_STATS(NONE, "Packets = %d",
  2360. pdev->stats.tx_i.inspect_pkts.num);
  2361. DP_TRACE_STATS(NONE, "Bytes = %d\n",
  2362. pdev->stats.tx_i.inspect_pkts.bytes);
  2363. }
  2364. /**
  2365. * dp_print_pdev_rx_stats(): Print Pdev level RX stats
  2366. * @pdev: DP_PDEV Handle
  2367. *
  2368. * Return: void
  2369. */
  2370. static inline void
  2371. dp_print_pdev_rx_stats(struct dp_pdev *pdev)
  2372. {
  2373. DP_TRACE_STATS(NONE, "WLAN Rx Stats:\n");
  2374. DP_TRACE_STATS(NONE, "Received From HW (Per Rx Ring):\n");
  2375. DP_TRACE_STATS(NONE, "Packets = %d %d %d %d",
  2376. pdev->stats.rx.rcvd_reo[0].num,
  2377. pdev->stats.rx.rcvd_reo[1].num,
  2378. pdev->stats.rx.rcvd_reo[2].num,
  2379. pdev->stats.rx.rcvd_reo[3].num);
  2380. DP_TRACE_STATS(NONE, "Bytes = %d %d %d %d\n",
  2381. pdev->stats.rx.rcvd_reo[0].bytes,
  2382. pdev->stats.rx.rcvd_reo[1].bytes,
  2383. pdev->stats.rx.rcvd_reo[2].bytes,
  2384. pdev->stats.rx.rcvd_reo[3].bytes);
  2385. DP_TRACE_STATS(NONE, "Replenished:\n");
  2386. DP_TRACE_STATS(NONE, "Packets = %d",
  2387. pdev->stats.replenish.pkts.num);
  2388. DP_TRACE_STATS(NONE, "Bytes = %d",
  2389. pdev->stats.replenish.pkts.bytes);
  2390. DP_TRACE_STATS(NONE, "Buffers Added To Freelist = %d\n",
  2391. pdev->stats.buf_freelist);
  2392. DP_TRACE_STATS(NONE, "Dropped:\n");
  2393. DP_TRACE_STATS(NONE, "Total Packets With Msdu Not Done = %d\n",
  2394. pdev->stats.dropped.msdu_not_done);
  2395. DP_TRACE_STATS(NONE, "Sent To Stack:\n");
  2396. DP_TRACE_STATS(NONE, "Packets = %d",
  2397. pdev->stats.rx.to_stack.num);
  2398. DP_TRACE_STATS(NONE, "Bytes = %d\n",
  2399. pdev->stats.rx.to_stack.bytes);
  2400. DP_TRACE_STATS(NONE, "Multicast/Broadcast:\n");
  2401. DP_TRACE_STATS(NONE, "Packets = %d",
  2402. pdev->stats.rx.multicast.num);
  2403. DP_TRACE_STATS(NONE, "Bytes = %d\n",
  2404. pdev->stats.rx.multicast.bytes);
  2405. DP_TRACE_STATS(NONE, "Errors:\n");
  2406. DP_TRACE_STATS(NONE, "Rxdma Ring Un-inititalized = %d",
  2407. pdev->stats.replenish.rxdma_err);
  2408. DP_TRACE_STATS(NONE, "Desc Alloc Failed: = %d",
  2409. pdev->stats.err.desc_alloc_fail);
  2410. }
  2411. /**
  2412. * dp_print_soc_tx_stats(): Print SOC level stats
  2413. * @soc DP_SOC Handle
  2414. *
  2415. * Return: void
  2416. */
  2417. static inline void
  2418. dp_print_soc_tx_stats(struct dp_soc *soc)
  2419. {
  2420. DP_TRACE_STATS(NONE, "SOC Tx Stats:\n");
  2421. DP_TRACE_STATS(NONE, "Tx Descriptors In Use = %d",
  2422. soc->stats.tx.desc_in_use);
  2423. DP_TRACE_STATS(NONE, "Invalid peer:\n");
  2424. DP_TRACE_STATS(NONE, "Packets = %d",
  2425. soc->stats.tx.tx_invalid_peer.num);
  2426. DP_TRACE_STATS(NONE, "Bytes = %d",
  2427. soc->stats.tx.tx_invalid_peer.bytes);
  2428. DP_TRACE_STATS(NONE, "Packets dropped due to TCL ring full = %d %d %d",
  2429. soc->stats.tx.tcl_ring_full[0],
  2430. soc->stats.tx.tcl_ring_full[1],
  2431. soc->stats.tx.tcl_ring_full[2]);
  2432. }
  2433. /**
  2434. * dp_print_soc_rx_stats: Print SOC level Rx stats
  2435. * @soc: DP_SOC Handle
  2436. *
  2437. * Return:void
  2438. */
  2439. static inline void
  2440. dp_print_soc_rx_stats(struct dp_soc *soc)
  2441. {
  2442. uint32_t i;
  2443. char reo_error[DP_REO_ERR_LENGTH];
  2444. char rxdma_error[DP_RXDMA_ERR_LENGTH];
  2445. uint8_t index = 0;
  2446. DP_TRACE_STATS(NONE, "SOC Rx Stats:\n");
  2447. DP_TRACE_STATS(NONE, "Errors:\n");
  2448. DP_TRACE_STATS(NONE, "Invalid RBM = %d",
  2449. soc->stats.rx.err.invalid_rbm);
  2450. DP_TRACE_STATS(NONE, "Invalid Vdev = %d",
  2451. soc->stats.rx.err.invalid_vdev);
  2452. DP_TRACE_STATS(NONE, "Invalid Pdev = %d",
  2453. soc->stats.rx.err.invalid_pdev);
  2454. DP_TRACE_STATS(NONE, "Invalid Peer = %d",
  2455. soc->stats.rx.err.rx_invalid_peer.num);
  2456. DP_TRACE_STATS(NONE, "HAL Ring Access Fail = %d",
  2457. soc->stats.rx.err.hal_ring_access_fail);
  2458. for (i = 0; i < MAX_RXDMA_ERRORS; i++) {
  2459. index += qdf_snprint(&rxdma_error[index],
  2460. DP_RXDMA_ERR_LENGTH - index,
  2461. " %d", soc->stats.rx.err.rxdma_error[i]);
  2462. }
  2463. DP_TRACE_STATS(NONE, "RXDMA Error (0-31):%s",
  2464. rxdma_error);
  2465. index = 0;
  2466. for (i = 0; i < REO_ERROR_TYPE_MAX; i++) {
  2467. index += qdf_snprint(&reo_error[index],
  2468. DP_REO_ERR_LENGTH - index,
  2469. " %d", soc->stats.rx.err.reo_error[i]);
  2470. }
  2471. DP_TRACE_STATS(NONE, "REO Error(0-14):%s",
  2472. reo_error);
  2473. }
  2474. /**
  2475. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  2476. * @vdev: DP_VDEV handle
  2477. *
  2478. * Return:void
  2479. */
  2480. static inline void
  2481. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  2482. {
  2483. struct dp_peer *peer = NULL;
  2484. DP_STATS_CLR(vdev->pdev);
  2485. DP_STATS_CLR(vdev->pdev->soc);
  2486. DP_STATS_CLR(vdev);
  2487. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2488. if (!peer)
  2489. return;
  2490. DP_STATS_CLR(peer);
  2491. }
  2492. }
  2493. /**
  2494. * dp_print_rx_rates(): Print Rx rate stats
  2495. * @vdev: DP_VDEV handle
  2496. *
  2497. * Return:void
  2498. */
  2499. static inline void
  2500. dp_print_rx_rates(struct dp_vdev *vdev)
  2501. {
  2502. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  2503. uint8_t i, pkt_type;
  2504. uint8_t index = 0;
  2505. char rx_mcs[DOT11_MAX][DP_MCS_LENGTH];
  2506. char nss[DP_NSS_LENGTH];
  2507. DP_TRACE_STATS(NONE, "Rx Rate Info:\n");
  2508. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2509. index = 0;
  2510. for (i = 0; i < MAX_MCS; i++) {
  2511. index += qdf_snprint(&rx_mcs[pkt_type][index],
  2512. DP_MCS_LENGTH - index,
  2513. " %d ",
  2514. pdev->stats.rx.pkt_type[pkt_type].
  2515. mcs_count[i]);
  2516. }
  2517. }
  2518. DP_TRACE_STATS(NONE, "11A MCS(0-7) = %s",
  2519. rx_mcs[0]);
  2520. DP_TRACE_STATS(NONE, "11A MCS Invalid = %d",
  2521. pdev->stats.rx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2522. DP_TRACE_STATS(NONE, "11B MCS(0-6) = %s",
  2523. rx_mcs[1]);
  2524. DP_TRACE_STATS(NONE, "11B MCS Invalid = %d",
  2525. pdev->stats.rx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2526. DP_TRACE_STATS(NONE, "11N MCS(0-7) = %s",
  2527. rx_mcs[2]);
  2528. DP_TRACE_STATS(NONE, "11N MCS Invalid = %d",
  2529. pdev->stats.rx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2530. DP_TRACE_STATS(NONE, "Type 11AC MCS(0-9) = %s",
  2531. rx_mcs[3]);
  2532. DP_TRACE_STATS(NONE, "11AC MCS Invalid = %d",
  2533. pdev->stats.rx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2534. DP_TRACE_STATS(NONE, "11AX MCS(0-11) = %s",
  2535. rx_mcs[4]);
  2536. DP_TRACE_STATS(NONE, "11AX MCS Invalid = %d",
  2537. pdev->stats.rx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2538. index = 0;
  2539. for (i = 0; i < SS_COUNT; i++) {
  2540. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  2541. " %d", pdev->stats.rx.nss[i]);
  2542. }
  2543. DP_TRACE_STATS(NONE, "NSS(0-7) = %s",
  2544. nss);
  2545. DP_TRACE_STATS(NONE, "SGI ="
  2546. " 0.8us %d,"
  2547. " 0.4us %d,"
  2548. " 1.6us %d,"
  2549. " 3.2us %d,",
  2550. pdev->stats.rx.sgi_count[0],
  2551. pdev->stats.rx.sgi_count[1],
  2552. pdev->stats.rx.sgi_count[2],
  2553. pdev->stats.rx.sgi_count[3]);
  2554. DP_TRACE_STATS(NONE, "BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  2555. pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
  2556. pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
  2557. DP_TRACE_STATS(NONE, "Reception Type ="
  2558. " SU: %d,"
  2559. " MU_MIMO:%d,"
  2560. " MU_OFDMA:%d,"
  2561. " MU_OFDMA_MIMO:%d\n",
  2562. pdev->stats.rx.reception_type[0],
  2563. pdev->stats.rx.reception_type[1],
  2564. pdev->stats.rx.reception_type[2],
  2565. pdev->stats.rx.reception_type[3]);
  2566. DP_TRACE_STATS(NONE, "Aggregation:\n");
  2567. DP_TRACE_STATS(NONE, "Number of Msdu's Part of Ampdus = %d",
  2568. pdev->stats.rx.ampdu_cnt);
  2569. DP_TRACE_STATS(NONE, "Number of Msdu's With No Mpdu Level Aggregation : %d",
  2570. pdev->stats.rx.non_ampdu_cnt);
  2571. DP_TRACE_STATS(NONE, "Number of Msdu's Part of Amsdu: %d",
  2572. pdev->stats.rx.amsdu_cnt);
  2573. DP_TRACE_STATS(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
  2574. pdev->stats.rx.non_amsdu_cnt);
  2575. }
  2576. /**
  2577. * dp_print_tx_rates(): Print tx rates
  2578. * @vdev: DP_VDEV handle
  2579. *
  2580. * Return:void
  2581. */
  2582. static inline void
  2583. dp_print_tx_rates(struct dp_vdev *vdev)
  2584. {
  2585. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  2586. uint8_t i, pkt_type;
  2587. char mcs[DOT11_MAX][DP_MCS_LENGTH];
  2588. uint32_t index;
  2589. DP_TRACE_STATS(NONE, "Tx Rate Info:\n");
  2590. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2591. index = 0;
  2592. for (i = 0; i < MAX_MCS; i++) {
  2593. index += qdf_snprint(&mcs[pkt_type][index],
  2594. DP_MCS_LENGTH - index,
  2595. " %d ",
  2596. pdev->stats.tx.pkt_type[pkt_type].
  2597. mcs_count[i]);
  2598. }
  2599. }
  2600. DP_TRACE_STATS(NONE, "11A MCS(0-7) = %s",
  2601. mcs[0]);
  2602. DP_TRACE_STATS(NONE, "11A MCS Invalid = %d",
  2603. pdev->stats.tx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2604. DP_TRACE_STATS(NONE, "11B MCS(0-6) = %s",
  2605. mcs[1]);
  2606. DP_TRACE_STATS(NONE, "11B MCS Invalid = %d",
  2607. pdev->stats.tx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2608. DP_TRACE_STATS(NONE, "11N MCS(0-7) = %s",
  2609. mcs[2]);
  2610. DP_TRACE_STATS(NONE, "11N MCS Invalid = %d",
  2611. pdev->stats.tx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2612. DP_TRACE_STATS(NONE, "Type 11AC MCS(0-9) = %s",
  2613. mcs[3]);
  2614. DP_TRACE_STATS(NONE, "11AC MCS Invalid = %d",
  2615. pdev->stats.tx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2616. DP_TRACE_STATS(NONE, "11AX MCS(0-11) = %s",
  2617. mcs[4]);
  2618. DP_TRACE_STATS(NONE, "11AX MCS Invalid = %d",
  2619. pdev->stats.tx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2620. DP_TRACE_STATS(NONE, "SGI ="
  2621. " 0.8us %d"
  2622. " 0.4us %d"
  2623. " 1.6us %d"
  2624. " 3.2us %d",
  2625. pdev->stats.tx.sgi_count[0],
  2626. pdev->stats.tx.sgi_count[1],
  2627. pdev->stats.tx.sgi_count[2],
  2628. pdev->stats.tx.sgi_count[3]);
  2629. DP_TRACE_STATS(NONE, "BW Counts = 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  2630. pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
  2631. pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
  2632. DP_TRACE_STATS(NONE, "OFDMA = %d", pdev->stats.tx.ofdma);
  2633. DP_TRACE_STATS(NONE, "STBC = %d", pdev->stats.tx.stbc);
  2634. DP_TRACE_STATS(NONE, "LDPC = %d", pdev->stats.tx.ldpc);
  2635. DP_TRACE_STATS(NONE, "Retries = %d", pdev->stats.tx.retries);
  2636. DP_TRACE_STATS(NONE, "Last ack rssi = %d\n", pdev->stats.tx.last_ack_rssi);
  2637. DP_TRACE_STATS(NONE, "Aggregation:\n");
  2638. DP_TRACE_STATS(NONE, "Number of Msdu's Part of Amsdu = %d",
  2639. pdev->stats.tx.amsdu_cnt);
  2640. DP_TRACE_STATS(NONE, "Number of Msdu's With No Msdu Level Aggregation = %d",
  2641. pdev->stats.tx.non_amsdu_cnt);
  2642. }
  2643. /**
  2644. * dp_print_peer_stats():print peer stats
  2645. * @peer: DP_PEER handle
  2646. *
  2647. * return void
  2648. */
  2649. static inline void dp_print_peer_stats(struct dp_peer *peer)
  2650. {
  2651. uint8_t i, pkt_type;
  2652. char tx_mcs[DOT11_MAX][DP_MCS_LENGTH];
  2653. char rx_mcs[DOT11_MAX][DP_MCS_LENGTH];
  2654. uint32_t index;
  2655. char nss[DP_NSS_LENGTH];
  2656. DP_TRACE_STATS(NONE, "Node Tx Stats:\n");
  2657. DP_TRACE_STATS(NONE, "Total Packet Completions = %d",
  2658. peer->stats.tx.comp_pkt.num);
  2659. DP_TRACE_STATS(NONE, "Total Bytes Completions = %d",
  2660. peer->stats.tx.comp_pkt.bytes);
  2661. DP_TRACE_STATS(NONE, "Success Packets = %d",
  2662. peer->stats.tx.tx_success.num);
  2663. DP_TRACE_STATS(NONE, "Success Bytes = %d",
  2664. peer->stats.tx.tx_success.bytes);
  2665. DP_TRACE_STATS(NONE, "Packets Failed = %d",
  2666. peer->stats.tx.tx_failed);
  2667. DP_TRACE_STATS(NONE, "Packets In OFDMA = %d",
  2668. peer->stats.tx.ofdma);
  2669. DP_TRACE_STATS(NONE, "Packets In STBC = %d",
  2670. peer->stats.tx.stbc);
  2671. DP_TRACE_STATS(NONE, "Packets In LDPC = %d",
  2672. peer->stats.tx.ldpc);
  2673. DP_TRACE_STATS(NONE, "Packet Retries = %d",
  2674. peer->stats.tx.retries);
  2675. DP_TRACE_STATS(NONE, "Msdu's Not Part of Ampdu = %d",
  2676. peer->stats.tx.non_amsdu_cnt);
  2677. DP_TRACE_STATS(NONE, "Mpdu's Part of Ampdu = %d",
  2678. peer->stats.tx.amsdu_cnt);
  2679. DP_TRACE_STATS(NONE, "Last Packet RSSI = %d",
  2680. peer->stats.tx.last_ack_rssi);
  2681. DP_TRACE_STATS(NONE, "Dropped At FW: FW Discard = %d",
  2682. peer->stats.tx.dropped.fw_discard);
  2683. DP_TRACE_STATS(NONE, "Dropped At FW: FW Discard Retired = %d",
  2684. peer->stats.tx.dropped.fw_discard_retired);
  2685. DP_TRACE_STATS(NONE, "Dropped At FW: FW Discard Untransmitted = %d",
  2686. peer->stats.tx.dropped.fw_discard_untransmitted);
  2687. DP_TRACE_STATS(NONE, "Dropped : Mpdu Age Out = %d",
  2688. peer->stats.tx.dropped.mpdu_age_out);
  2689. DP_TRACE_STATS(NONE, "Dropped : FW Discard Reason1 = %d",
  2690. peer->stats.tx.dropped.fw_discard_reason1);
  2691. DP_TRACE_STATS(NONE, "Dropped : FW Discard Reason2 = %d",
  2692. peer->stats.tx.dropped.fw_discard_reason2);
  2693. DP_TRACE_STATS(NONE, "Dropped : FW Discard Reason3 = %d",
  2694. peer->stats.tx.dropped.fw_discard_reason3);
  2695. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2696. index = 0;
  2697. for (i = 0; i < MAX_MCS; i++) {
  2698. index += qdf_snprint(&tx_mcs[pkt_type][index],
  2699. DP_MCS_LENGTH - index,
  2700. " %d ",
  2701. peer->stats.tx.pkt_type[pkt_type].
  2702. mcs_count[i]);
  2703. }
  2704. }
  2705. DP_TRACE_STATS(NONE, "11A MCS(0-7) = %s",
  2706. tx_mcs[0]);
  2707. DP_TRACE_STATS(NONE, "11A MCS Invalid = %d",
  2708. peer->stats.tx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2709. DP_TRACE_STATS(NONE, "11B MCS(0-6) = %s",
  2710. tx_mcs[1]);
  2711. DP_TRACE_STATS(NONE, "11B MCS Invalid = %d",
  2712. peer->stats.tx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2713. DP_TRACE_STATS(NONE, "11N MCS(0-7) = %s",
  2714. tx_mcs[2]);
  2715. DP_TRACE_STATS(NONE, "11N MCS Invalid = %d",
  2716. peer->stats.tx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2717. DP_TRACE_STATS(NONE, "11AC MCS(0-9) = %s",
  2718. tx_mcs[3]);
  2719. DP_TRACE_STATS(NONE, "11AC MCS Invalid = %d",
  2720. peer->stats.tx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2721. DP_TRACE_STATS(NONE, "11AX MCS(0-11) = %s",
  2722. tx_mcs[4]);
  2723. DP_TRACE_STATS(NONE, "11AX MCS Invalid = %d",
  2724. peer->stats.tx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2725. DP_TRACE_STATS(NONE, "SGI = "
  2726. " 0.8us %d"
  2727. " 0.4us %d"
  2728. " 1.6us %d"
  2729. " 3.2us %d",
  2730. peer->stats.tx.sgi_count[0],
  2731. peer->stats.tx.sgi_count[1],
  2732. peer->stats.tx.sgi_count[2],
  2733. peer->stats.tx.sgi_count[3]);
  2734. DP_TRACE_STATS(NONE, "BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d\n",
  2735. peer->stats.tx.bw[0], peer->stats.tx.bw[1],
  2736. peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
  2737. DP_TRACE_STATS(NONE, "Aggregation:\n");
  2738. DP_TRACE_STATS(NONE, "Number of Msdu's Part of Amsdu = %d",
  2739. peer->stats.tx.amsdu_cnt);
  2740. DP_TRACE_STATS(NONE, "Number of Msdu's With No Msdu Level Aggregation = %d\n",
  2741. peer->stats.tx.non_amsdu_cnt);
  2742. DP_TRACE_STATS(NONE, "Node Rx Stats:\n");
  2743. DP_TRACE_STATS(NONE, "Packets Sent To Stack = %d",
  2744. peer->stats.rx.to_stack.num);
  2745. DP_TRACE_STATS(NONE, "Bytes Sent To Stack = %d",
  2746. peer->stats.rx.to_stack.bytes);
  2747. for (i = 0; i < CDP_MAX_RX_RINGS; i++) {
  2748. DP_TRACE_STATS(NONE, "Packets Received = %d",
  2749. peer->stats.rx.rcvd_reo[i].num);
  2750. DP_TRACE_STATS(NONE, "Bytes Received = %d",
  2751. peer->stats.rx.rcvd_reo[i].bytes);
  2752. }
  2753. DP_TRACE_STATS(NONE, "Multicast Packets Received = %d",
  2754. peer->stats.rx.multicast.num);
  2755. DP_TRACE_STATS(NONE, "Multicast Bytes Received = %d",
  2756. peer->stats.rx.multicast.bytes);
  2757. DP_TRACE_STATS(NONE, "WDS Packets Received = %d",
  2758. peer->stats.rx.wds.num);
  2759. DP_TRACE_STATS(NONE, "WDS Bytes Received = %d",
  2760. peer->stats.rx.wds.bytes);
  2761. DP_TRACE_STATS(NONE, "Intra BSS Packets Received = %d",
  2762. peer->stats.rx.intra_bss.pkts.num);
  2763. DP_TRACE_STATS(NONE, "Intra BSS Bytes Received = %d",
  2764. peer->stats.rx.intra_bss.pkts.bytes);
  2765. DP_TRACE_STATS(NONE, "Raw Packets Received = %d",
  2766. peer->stats.rx.raw.num);
  2767. DP_TRACE_STATS(NONE, "Raw Bytes Received = %d",
  2768. peer->stats.rx.raw.bytes);
  2769. DP_TRACE_STATS(NONE, "Errors: MIC Errors = %d",
  2770. peer->stats.rx.err.mic_err);
  2771. DP_TRACE_STATS(NONE, "Erros: Decryption Errors = %d",
  2772. peer->stats.rx.err.decrypt_err);
  2773. DP_TRACE_STATS(NONE, "Msdu's Received As Part of Ampdu = %d",
  2774. peer->stats.rx.non_ampdu_cnt);
  2775. DP_TRACE_STATS(NONE, "Msdu's Recived As Ampdu = %d",
  2776. peer->stats.rx.ampdu_cnt);
  2777. DP_TRACE_STATS(NONE, "Msdu's Received Not Part of Amsdu's = %d",
  2778. peer->stats.rx.non_amsdu_cnt);
  2779. DP_TRACE_STATS(NONE, "MSDUs Received As Part of Amsdu = %d",
  2780. peer->stats.rx.amsdu_cnt);
  2781. DP_TRACE_STATS(NONE, "SGI ="
  2782. " 0.8us %d"
  2783. " 0.4us %d"
  2784. " 1.6us %d"
  2785. " 3.2us %d",
  2786. peer->stats.rx.sgi_count[0],
  2787. peer->stats.rx.sgi_count[1],
  2788. peer->stats.rx.sgi_count[2],
  2789. peer->stats.rx.sgi_count[3]);
  2790. DP_TRACE_STATS(NONE, "BW Counts = 20MHZ %d 40MHZ %d 80MHZ %d 160MHZ %d",
  2791. peer->stats.rx.bw[0], peer->stats.rx.bw[1],
  2792. peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
  2793. DP_TRACE_STATS(NONE, "Reception Type ="
  2794. " SU %d,"
  2795. " MU_MIMO %d,"
  2796. " MU_OFDMA %d,"
  2797. " MU_OFDMA_MIMO %d",
  2798. peer->stats.rx.reception_type[0],
  2799. peer->stats.rx.reception_type[1],
  2800. peer->stats.rx.reception_type[2],
  2801. peer->stats.rx.reception_type[3]);
  2802. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2803. index = 0;
  2804. for (i = 0; i < MAX_MCS; i++) {
  2805. index += qdf_snprint(&rx_mcs[pkt_type][index],
  2806. DP_MCS_LENGTH - index,
  2807. " %d ",
  2808. peer->stats.rx.pkt_type[pkt_type].
  2809. mcs_count[i]);
  2810. }
  2811. }
  2812. DP_TRACE_STATS(NONE, "11A MCS(0-7) = %s",
  2813. rx_mcs[0]);
  2814. DP_TRACE_STATS(NONE, "11A MCS Invalid = %d",
  2815. peer->stats.rx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2816. DP_TRACE_STATS(NONE, "11B MCS(0-6) = %s",
  2817. rx_mcs[1]);
  2818. DP_TRACE_STATS(NONE, "11B MCS Invalid = %d",
  2819. peer->stats.rx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2820. DP_TRACE_STATS(NONE, "11N MCS(0-7) = %s",
  2821. rx_mcs[2]);
  2822. DP_TRACE_STATS(NONE, "11N MCS Invalid = %d",
  2823. peer->stats.rx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2824. DP_TRACE_STATS(NONE, "11AC MCS(0-9) = %s",
  2825. rx_mcs[3]);
  2826. DP_TRACE_STATS(NONE, "11AC MCS Invalid = %d",
  2827. peer->stats.rx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2828. DP_TRACE_STATS(NONE, "11AX MCS(0-11) = %s",
  2829. rx_mcs[4]);
  2830. DP_TRACE_STATS(NONE, "11AX MCS Invalid = %d",
  2831. peer->stats.rx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2832. index = 0;
  2833. for (i = 0; i < SS_COUNT; i++) {
  2834. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  2835. " %d", peer->stats.rx.nss[i]);
  2836. }
  2837. DP_TRACE_STATS(NONE, "NSS(0-7) = %s\n",
  2838. nss);
  2839. DP_TRACE_STATS(NONE, "Aggregation:\n");
  2840. DP_TRACE_STATS(NONE, "Number of Msdu's Part of Ampdu = %d",
  2841. peer->stats.rx.ampdu_cnt);
  2842. DP_TRACE_STATS(NONE, "Number of Msdu's With No Mpdu Level Aggregation = %d",
  2843. peer->stats.rx.non_ampdu_cnt);
  2844. DP_TRACE_STATS(NONE, "Number of Msdu's Part of Amsdu = %d",
  2845. peer->stats.rx.amsdu_cnt);
  2846. DP_TRACE_STATS(NONE, "Number of Msdu's With No Msdu Level Aggregation = %d",
  2847. peer->stats.rx.non_amsdu_cnt);
  2848. }
  2849. /**
  2850. * dp_print_host_stats()- Function to print the stats aggregated at host
  2851. * @vdev_handle: DP_VDEV handle
  2852. * @req: ol_txrx_stats_req
  2853. * @type: host stats type
  2854. *
  2855. * Available Stat types
  2856. * TXRX_RX_RATE_STATS: Print Rx Rate Info
  2857. * TXRX_TX_RATE_STATS: Print Tx Rate Info
  2858. * TXRX_TX_HOST_STATS: Print Tx Stats
  2859. * TXRX_RX_HOST_STATS: Print Rx Stats
  2860. * TXRX_CLEAR_STATS : Clear the stats
  2861. *
  2862. * Return: 0 on success, print error message in case of failure
  2863. */
  2864. static int
  2865. dp_print_host_stats(struct cdp_vdev *vdev_handle, struct ol_txrx_stats_req *req,
  2866. enum cdp_host_txrx_stats type)
  2867. {
  2868. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2869. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  2870. dp_aggregate_pdev_stats(pdev);
  2871. switch (type) {
  2872. case TXRX_RX_RATE_STATS:
  2873. dp_print_rx_rates(vdev);
  2874. break;
  2875. case TXRX_TX_RATE_STATS:
  2876. dp_print_tx_rates(vdev);
  2877. break;
  2878. case TXRX_TX_HOST_STATS:
  2879. dp_print_pdev_tx_stats(pdev);
  2880. dp_print_soc_tx_stats(pdev->soc);
  2881. break;
  2882. case TXRX_RX_HOST_STATS:
  2883. dp_print_pdev_rx_stats(pdev);
  2884. dp_print_soc_rx_stats(pdev->soc);
  2885. break;
  2886. case TXRX_CLEAR_STATS:
  2887. dp_txrx_host_stats_clr(vdev);
  2888. break;
  2889. default:
  2890. DP_TRACE(NONE, "Wrong Input For TxRx Host Stats");
  2891. break;
  2892. }
  2893. return 0;
  2894. }
  2895. /*
  2896. * dp_get_peer_stats()- function to print peer stats
  2897. * @pdev_handle: DP_PDEV handle
  2898. * @mac_addr: mac address of the peer
  2899. *
  2900. * Return: void
  2901. */
  2902. static void
  2903. dp_get_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
  2904. {
  2905. struct dp_peer *peer;
  2906. uint8_t local_id;
  2907. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
  2908. &local_id);
  2909. dp_print_peer_stats(peer);
  2910. return;
  2911. }
  2912. /*
  2913. * dp_set_vdev_param: function to set parameters in vdev
  2914. * @param: parameter type to be set
  2915. * @val: value of parameter to be set
  2916. *
  2917. * return: void
  2918. */
  2919. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  2920. enum cdp_vdev_param_type param, uint32_t val)
  2921. {
  2922. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2923. switch (param) {
  2924. case CDP_ENABLE_WDS:
  2925. vdev->wds_enabled = val;
  2926. break;
  2927. case CDP_ENABLE_NAWDS:
  2928. vdev->nawds_enabled = val;
  2929. case CDP_ENABLE_MCAST_EN:
  2930. vdev->mcast_enhancement_en = val;
  2931. break;
  2932. case CDP_ENABLE_PROXYSTA:
  2933. vdev->proxysta_vdev = val;
  2934. break;
  2935. case CDP_UPDATE_TDLS_FLAGS:
  2936. vdev->tdls_link_connected = val;
  2937. default:
  2938. break;
  2939. }
  2940. dp_tx_vdev_update_search_flags(vdev);
  2941. }
  2942. /**
  2943. * dp_peer_set_nawds: set nawds bit in peer
  2944. * @peer_handle: pointer to peer
  2945. * @value: enable/disable nawds
  2946. *
  2947. * return: void
  2948. */
  2949. static void dp_peer_set_nawds(void *peer_handle, uint8_t value)
  2950. {
  2951. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2952. peer->nawds_enabled = value;
  2953. }
  2954. /*
  2955. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  2956. * @vdev_handle: DP_VDEV handle
  2957. * @map_id:ID of map that needs to be updated
  2958. *
  2959. * Return: void
  2960. */
  2961. static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
  2962. uint8_t map_id)
  2963. {
  2964. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2965. vdev->dscp_tid_map_id = map_id;
  2966. return;
  2967. }
  2968. /**
  2969. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  2970. * @pdev: DP_PDEV handle
  2971. * @map_id: ID of map that needs to be updated
  2972. * @tos: index value in map
  2973. * @tid: tid value passed by the user
  2974. *
  2975. * Return: void
  2976. */
  2977. static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
  2978. uint8_t map_id, uint8_t tos, uint8_t tid)
  2979. {
  2980. uint8_t dscp;
  2981. struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
  2982. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  2983. pdev->dscp_tid_map[map_id][dscp] = tid;
  2984. hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
  2985. map_id, dscp);
  2986. return;
  2987. }
  2988. /*
  2989. * dp_txrx_stats() - function to map to firmware and host stats
  2990. * @vdev: virtual handle
  2991. * @req: statistics request handle
  2992. * @stats: type of statistics requested
  2993. *
  2994. * Return: integer
  2995. */
  2996. static int dp_txrx_stats(struct cdp_vdev *vdev,
  2997. struct ol_txrx_stats_req *req, enum cdp_stats stats)
  2998. {
  2999. int host_stats;
  3000. int fw_stats;
  3001. if (stats >= CDP_TXRX_MAX_STATS)
  3002. return 0;
  3003. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  3004. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  3005. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3006. "stats: %u fw_stats_type: %d host_stats_type: %d",
  3007. stats, fw_stats, host_stats);
  3008. /* TODO: Firmware Mapping not implemented */
  3009. if ((host_stats != TXRX_HOST_STATS_INVALID) &&
  3010. (host_stats <= TXRX_HOST_STATS_MAX))
  3011. return dp_print_host_stats(vdev, req, host_stats);
  3012. else
  3013. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3014. "Wrong Input for TxRx Stats");
  3015. return 0;
  3016. }
  3017. /*
  3018. * dp_print_per_ring_stats(): Packet count per ring
  3019. * @soc - soc handle
  3020. */
  3021. static void dp_print_per_ring_stats(struct dp_soc *soc)
  3022. {
  3023. uint8_t core, ring;
  3024. uint64_t total_packets;
  3025. DP_TRACE(NONE, "Reo packets per ring:");
  3026. for (ring = 0; ring < MAX_REO_DEST_RINGS; ring++) {
  3027. total_packets = 0;
  3028. DP_TRACE(NONE, "Packets on ring %u:", ring);
  3029. for (core = 0; core < NR_CPUS; core++) {
  3030. DP_TRACE(NONE, "Packets arriving on core %u: %llu",
  3031. core, soc->stats.rx.ring_packets[core][ring]);
  3032. total_packets += soc->stats.rx.ring_packets[core][ring];
  3033. }
  3034. DP_TRACE(NONE, "Total packets on ring %u: %llu",
  3035. ring, total_packets);
  3036. }
  3037. }
  3038. /*
  3039. * dp_txrx_path_stats() - Function to display dump stats
  3040. * @soc - soc handle
  3041. *
  3042. * return: none
  3043. */
  3044. static void dp_txrx_path_stats(struct dp_soc *soc)
  3045. {
  3046. uint8_t error_code;
  3047. uint8_t loop_pdev;
  3048. struct dp_pdev *pdev;
  3049. uint8_t i;
  3050. for (loop_pdev = 0; loop_pdev < soc->pdev_count; loop_pdev++) {
  3051. pdev = soc->pdev_list[loop_pdev];
  3052. dp_aggregate_pdev_stats(pdev);
  3053. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3054. "Tx path Statistics:");
  3055. DP_TRACE(NONE, "from stack: %u msdus (%u bytes)",
  3056. pdev->stats.tx_i.rcvd.num,
  3057. pdev->stats.tx_i.rcvd.bytes);
  3058. DP_TRACE(NONE, "processed from host: %u msdus (%u bytes)",
  3059. pdev->stats.tx_i.processed.num,
  3060. pdev->stats.tx_i.processed.bytes);
  3061. DP_TRACE(NONE, "successfully transmitted: %u msdus (%u bytes)",
  3062. pdev->stats.tx.tx_success.num,
  3063. pdev->stats.tx.tx_success.bytes);
  3064. DP_TRACE(NONE, "Dropped in host:");
  3065. DP_TRACE(NONE, "Total packets dropped: %u,",
  3066. pdev->stats.tx_i.dropped.dropped_pkt.num);
  3067. DP_TRACE(NONE, "Descriptor not available: %u",
  3068. pdev->stats.tx_i.dropped.desc_na);
  3069. DP_TRACE(NONE, "Ring full: %u",
  3070. pdev->stats.tx_i.dropped.ring_full);
  3071. DP_TRACE(NONE, "Enqueue fail: %u",
  3072. pdev->stats.tx_i.dropped.enqueue_fail);
  3073. DP_TRACE(NONE, "DMA Error: %u",
  3074. pdev->stats.tx_i.dropped.dma_error);
  3075. DP_TRACE(NONE, "Dropped in hardware:");
  3076. DP_TRACE(NONE, "total packets dropped: %u",
  3077. pdev->stats.tx.tx_failed);
  3078. DP_TRACE(NONE, "mpdu age out: %u",
  3079. pdev->stats.tx.dropped.mpdu_age_out);
  3080. DP_TRACE(NONE, "firmware discard reason1: %u",
  3081. pdev->stats.tx.dropped.fw_discard_reason1);
  3082. DP_TRACE(NONE, "firmware discard reason2: %u",
  3083. pdev->stats.tx.dropped.fw_discard_reason2);
  3084. DP_TRACE(NONE, "firmware discard reason3: %u",
  3085. pdev->stats.tx.dropped.fw_discard_reason3);
  3086. DP_TRACE(NONE, "peer_invalid: %u",
  3087. pdev->soc->stats.tx.tx_invalid_peer.num);
  3088. DP_TRACE(NONE, "Tx packets sent per interrupt:");
  3089. DP_TRACE(NONE, "Single Packet: %u",
  3090. pdev->stats.tx_comp_histogram.pkts_1);
  3091. DP_TRACE(NONE, "2-20 Packets: %u",
  3092. pdev->stats.tx_comp_histogram.pkts_2_20);
  3093. DP_TRACE(NONE, "21-40 Packets: %u",
  3094. pdev->stats.tx_comp_histogram.pkts_21_40);
  3095. DP_TRACE(NONE, "41-60 Packets: %u",
  3096. pdev->stats.tx_comp_histogram.pkts_41_60);
  3097. DP_TRACE(NONE, "61-80 Packets: %u",
  3098. pdev->stats.tx_comp_histogram.pkts_61_80);
  3099. DP_TRACE(NONE, "81-100 Packets: %u",
  3100. pdev->stats.tx_comp_histogram.pkts_81_100);
  3101. DP_TRACE(NONE, "101-200 Packets: %u",
  3102. pdev->stats.tx_comp_histogram.pkts_101_200);
  3103. DP_TRACE(NONE, " 201+ Packets: %u",
  3104. pdev->stats.tx_comp_histogram.pkts_201_plus);
  3105. DP_TRACE(NONE, "Rx path statistics");
  3106. DP_TRACE(NONE, "delivered %u msdus ( %u bytes),",
  3107. pdev->stats.rx.to_stack.num,
  3108. pdev->stats.rx.to_stack.bytes);
  3109. for (i = 0; i < CDP_MAX_RX_RINGS; i++)
  3110. DP_TRACE(NONE, "received on reo[%d] %u msdus ( %u bytes),",
  3111. i, pdev->stats.rx.rcvd_reo[i].num,
  3112. pdev->stats.rx.rcvd_reo[i].bytes);
  3113. DP_TRACE(NONE, "intra-bss packets %u msdus ( %u bytes),",
  3114. pdev->stats.rx.intra_bss.pkts.num,
  3115. pdev->stats.rx.intra_bss.pkts.bytes);
  3116. DP_TRACE(NONE, "raw packets %u msdus ( %u bytes),",
  3117. pdev->stats.rx.raw.num,
  3118. pdev->stats.rx.raw.bytes);
  3119. DP_TRACE(NONE, "dropped: error %u msdus",
  3120. pdev->stats.rx.err.mic_err);
  3121. DP_TRACE(NONE, "peer invalid %u",
  3122. pdev->soc->stats.rx.err.rx_invalid_peer.num);
  3123. DP_TRACE(NONE, "Reo Statistics");
  3124. DP_TRACE(NONE, "rbm error: %u msdus",
  3125. pdev->soc->stats.rx.err.invalid_rbm);
  3126. DP_TRACE(NONE, "hal ring access fail: %u msdus",
  3127. pdev->soc->stats.rx.err.hal_ring_access_fail);
  3128. DP_TRACE(NONE, "Reo errors");
  3129. for (error_code = 0; error_code < REO_ERROR_TYPE_MAX;
  3130. error_code++) {
  3131. DP_TRACE(NONE, "Reo error number (%u): %u msdus",
  3132. error_code,
  3133. pdev->soc->stats.rx.err.reo_error[error_code]);
  3134. }
  3135. for (error_code = 0; error_code < MAX_RXDMA_ERRORS;
  3136. error_code++) {
  3137. DP_TRACE(NONE, "Rxdma error number (%u): %u msdus",
  3138. error_code,
  3139. pdev->soc->stats.rx.err
  3140. .rxdma_error[error_code]);
  3141. }
  3142. DP_TRACE(NONE, "Rx packets reaped per interrupt:");
  3143. DP_TRACE(NONE, "Single Packet: %u",
  3144. pdev->stats.rx_ind_histogram.pkts_1);
  3145. DP_TRACE(NONE, "2-20 Packets: %u",
  3146. pdev->stats.rx_ind_histogram.pkts_2_20);
  3147. DP_TRACE(NONE, "21-40 Packets: %u",
  3148. pdev->stats.rx_ind_histogram.pkts_21_40);
  3149. DP_TRACE(NONE, "41-60 Packets: %u",
  3150. pdev->stats.rx_ind_histogram.pkts_41_60);
  3151. DP_TRACE(NONE, "61-80 Packets: %u",
  3152. pdev->stats.rx_ind_histogram.pkts_61_80);
  3153. DP_TRACE(NONE, "81-100 Packets: %u",
  3154. pdev->stats.rx_ind_histogram.pkts_81_100);
  3155. DP_TRACE(NONE, "101-200 Packets: %u",
  3156. pdev->stats.rx_ind_histogram.pkts_101_200);
  3157. DP_TRACE(NONE, " 201+ Packets: %u",
  3158. pdev->stats.rx_ind_histogram.pkts_201_plus);
  3159. }
  3160. }
  3161. /*
  3162. * dp_txrx_dump_stats() - Dump statistics
  3163. * @value - Statistics option
  3164. */
  3165. static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value)
  3166. {
  3167. struct dp_soc *soc =
  3168. (struct dp_soc *)psoc;
  3169. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3170. if (!soc) {
  3171. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3172. "%s: soc is NULL", __func__);
  3173. return QDF_STATUS_E_INVAL;
  3174. }
  3175. switch (value) {
  3176. case CDP_TXRX_PATH_STATS:
  3177. dp_txrx_path_stats(soc);
  3178. break;
  3179. case CDP_RX_RING_STATS:
  3180. dp_print_per_ring_stats(soc);
  3181. break;
  3182. case CDP_TXRX_TSO_STATS:
  3183. /* TODO: NOT IMPLEMENTED */
  3184. break;
  3185. case CDP_DUMP_TX_FLOW_POOL_INFO:
  3186. /* TODO: NOT IMPLEMENTED */
  3187. break;
  3188. case CDP_TXRX_DESC_STATS:
  3189. /* TODO: NOT IMPLEMENTED */
  3190. break;
  3191. default:
  3192. status = QDF_STATUS_E_INVAL;
  3193. break;
  3194. }
  3195. return status;
  3196. }
  3197. static struct cdp_wds_ops dp_ops_wds = {
  3198. .vdev_set_wds = dp_vdev_set_wds,
  3199. };
  3200. static struct cdp_cmn_ops dp_ops_cmn = {
  3201. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  3202. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  3203. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  3204. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  3205. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  3206. .txrx_peer_create = dp_peer_create_wifi3,
  3207. .txrx_peer_setup = dp_peer_setup_wifi3,
  3208. .txrx_peer_teardown = NULL,
  3209. .txrx_peer_delete = dp_peer_delete_wifi3,
  3210. .txrx_vdev_register = dp_vdev_register_wifi3,
  3211. .txrx_soc_detach = dp_soc_detach_wifi3,
  3212. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  3213. .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
  3214. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  3215. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  3216. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  3217. .delba_process = dp_delba_process_wifi3,
  3218. .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
  3219. .flush_cache_rx_queue = NULL,
  3220. /* TODO: get API's for dscp-tid need to be added*/
  3221. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  3222. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  3223. .txrx_stats = dp_txrx_stats,
  3224. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  3225. .display_stats = dp_txrx_dump_stats,
  3226. /* TODO: Add other functions */
  3227. };
  3228. static struct cdp_ctrl_ops dp_ops_ctrl = {
  3229. .txrx_peer_authorize = dp_peer_authorize,
  3230. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  3231. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  3232. #ifdef MESH_MODE_SUPPORT
  3233. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  3234. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  3235. #endif
  3236. .txrx_set_vdev_param = dp_set_vdev_param,
  3237. .txrx_peer_set_nawds = dp_peer_set_nawds,
  3238. .txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
  3239. .txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
  3240. .txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
  3241. .txrx_update_filter_neighbour_peers =
  3242. dp_update_filter_neighbour_peers,
  3243. /* TODO: Add other functions */
  3244. };
  3245. static struct cdp_me_ops dp_ops_me = {
  3246. #ifdef ATH_SUPPORT_IQUE
  3247. .tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
  3248. .tx_me_free_descriptor = dp_tx_me_free_descriptor,
  3249. .tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
  3250. #endif
  3251. };
  3252. static struct cdp_mon_ops dp_ops_mon = {
  3253. .txrx_monitor_set_filter_ucast_data = NULL,
  3254. .txrx_monitor_set_filter_mcast_data = NULL,
  3255. .txrx_monitor_set_filter_non_data = NULL,
  3256. .txrx_monitor_get_filter_ucast_data = NULL,
  3257. .txrx_monitor_get_filter_mcast_data = NULL,
  3258. .txrx_monitor_get_filter_non_data = NULL,
  3259. .txrx_reset_monitor_mode = NULL,
  3260. };
  3261. static struct cdp_host_stats_ops dp_ops_host_stats = {
  3262. .txrx_host_stats_get = dp_print_host_stats,
  3263. .txrx_per_peer_stats = dp_get_peer_stats,
  3264. /* TODO */
  3265. };
  3266. static struct cdp_raw_ops dp_ops_raw = {
  3267. /* TODO */
  3268. };
  3269. #ifdef CONFIG_WIN
  3270. static struct cdp_pflow_ops dp_ops_pflow = {
  3271. /* TODO */
  3272. };
  3273. #endif /* CONFIG_WIN */
  3274. #ifndef CONFIG_WIN
  3275. static struct cdp_misc_ops dp_ops_misc = {
  3276. .get_opmode = dp_get_opmode,
  3277. };
  3278. static struct cdp_flowctl_ops dp_ops_flowctl = {
  3279. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3280. };
  3281. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  3282. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3283. };
  3284. static struct cdp_ipa_ops dp_ops_ipa = {
  3285. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3286. };
  3287. static struct cdp_lro_ops dp_ops_lro = {
  3288. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3289. };
  3290. /**
  3291. * dp_dummy_bus_suspend() - dummy bus suspend op
  3292. *
  3293. * FIXME - This is a placeholder for the actual logic!
  3294. *
  3295. * Return: QDF_STATUS_SUCCESS
  3296. */
  3297. inline QDF_STATUS dp_dummy_bus_suspend(void)
  3298. {
  3299. return QDF_STATUS_SUCCESS;
  3300. }
  3301. /**
  3302. * dp_dummy_bus_resume() - dummy bus resume
  3303. *
  3304. * FIXME - This is a placeholder for the actual logic!
  3305. *
  3306. * Return: QDF_STATUS_SUCCESS
  3307. */
  3308. inline QDF_STATUS dp_dummy_bus_resume(void)
  3309. {
  3310. return QDF_STATUS_SUCCESS;
  3311. }
  3312. static struct cdp_bus_ops dp_ops_bus = {
  3313. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3314. .bus_suspend = dp_dummy_bus_suspend,
  3315. .bus_resume = dp_dummy_bus_resume
  3316. };
  3317. static struct cdp_ocb_ops dp_ops_ocb = {
  3318. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3319. };
  3320. static struct cdp_throttle_ops dp_ops_throttle = {
  3321. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3322. };
  3323. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  3324. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3325. };
  3326. static struct cdp_cfg_ops dp_ops_cfg = {
  3327. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  3328. };
  3329. static struct cdp_peer_ops dp_ops_peer = {
  3330. .register_peer = dp_register_peer,
  3331. .clear_peer = dp_clear_peer,
  3332. .find_peer_by_addr = dp_find_peer_by_addr,
  3333. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  3334. .local_peer_id = dp_local_peer_id,
  3335. .peer_find_by_local_id = dp_peer_find_by_local_id,
  3336. .peer_state_update = dp_peer_state_update,
  3337. .get_vdevid = dp_get_vdevid,
  3338. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  3339. .get_vdev_for_peer = dp_get_vdev_for_peer,
  3340. .get_peer_state = dp_get_peer_state,
  3341. .last_assoc_received = dp_get_last_assoc_received,
  3342. .last_disassoc_received = dp_get_last_disassoc_received,
  3343. .last_deauth_received = dp_get_last_deauth_received,
  3344. };
  3345. #endif
  3346. static struct cdp_ops dp_txrx_ops = {
  3347. .cmn_drv_ops = &dp_ops_cmn,
  3348. .ctrl_ops = &dp_ops_ctrl,
  3349. .me_ops = &dp_ops_me,
  3350. .mon_ops = &dp_ops_mon,
  3351. .host_stats_ops = &dp_ops_host_stats,
  3352. .wds_ops = &dp_ops_wds,
  3353. .raw_ops = &dp_ops_raw,
  3354. #ifdef CONFIG_WIN
  3355. .pflow_ops = &dp_ops_pflow,
  3356. #endif /* CONFIG_WIN */
  3357. #ifndef CONFIG_WIN
  3358. .misc_ops = &dp_ops_misc,
  3359. .cfg_ops = &dp_ops_cfg,
  3360. .flowctl_ops = &dp_ops_flowctl,
  3361. .l_flowctl_ops = &dp_ops_l_flowctl,
  3362. .ipa_ops = &dp_ops_ipa,
  3363. .lro_ops = &dp_ops_lro,
  3364. .bus_ops = &dp_ops_bus,
  3365. .ocb_ops = &dp_ops_ocb,
  3366. .peer_ops = &dp_ops_peer,
  3367. .throttle_ops = &dp_ops_throttle,
  3368. .mob_stats_ops = &dp_ops_mob_stats,
  3369. #endif
  3370. };
  3371. /*
  3372. * dp_soc_attach_wifi3() - Attach txrx SOC
  3373. * @osif_soc: Opaque SOC handle from OSIF/HDD
  3374. * @htc_handle: Opaque HTC handle
  3375. * @hif_handle: Opaque HIF handle
  3376. * @qdf_osdev: QDF device
  3377. *
  3378. * Return: DP SOC handle on success, NULL on failure
  3379. */
  3380. /*
  3381. * Local prototype added to temporarily address warning caused by
  3382. * -Wmissing-prototypes. A more correct solution, namely to expose
  3383. * a prototype in an appropriate header file, will come later.
  3384. */
  3385. void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
  3386. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  3387. struct ol_if_ops *ol_ops, struct wlan_objmgr_psoc *psoc);
  3388. void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
  3389. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  3390. struct ol_if_ops *ol_ops, struct wlan_objmgr_psoc *psoc)
  3391. {
  3392. struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
  3393. if (!soc) {
  3394. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3395. FL("DP SOC memory allocation failed"));
  3396. goto fail0;
  3397. }
  3398. soc->cdp_soc.ops = &dp_txrx_ops;
  3399. soc->cdp_soc.ol_ops = ol_ops;
  3400. soc->osif_soc = osif_soc;
  3401. soc->osdev = qdf_osdev;
  3402. soc->hif_handle = hif_handle;
  3403. soc->psoc = psoc;
  3404. soc->hal_soc = hif_get_hal_handle(hif_handle);
  3405. soc->htt_handle = htt_soc_attach(soc, osif_soc, htc_handle,
  3406. soc->hal_soc, qdf_osdev);
  3407. if (!soc->htt_handle) {
  3408. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3409. FL("HTT attach failed"));
  3410. goto fail1;
  3411. }
  3412. soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
  3413. if (!soc->wlan_cfg_ctx) {
  3414. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3415. FL("wlan_cfg_soc_attach failed"));
  3416. goto fail2;
  3417. }
  3418. qdf_spinlock_create(&soc->peer_ref_mutex);
  3419. if (dp_soc_interrupt_attach(soc) != QDF_STATUS_SUCCESS) {
  3420. goto fail2;
  3421. }
  3422. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  3423. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  3424. return (void *)soc;
  3425. fail2:
  3426. htt_soc_detach(soc->htt_handle);
  3427. fail1:
  3428. qdf_mem_free(soc);
  3429. fail0:
  3430. return NULL;
  3431. }