dp_peer.c 114 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <hal_hw_headers.h>
  21. #include "dp_htt.h"
  22. #include "dp_types.h"
  23. #include "dp_internal.h"
  24. #include "dp_peer.h"
  25. #include "dp_rx_defrag.h"
  26. #include "dp_rx.h"
  27. #include <hal_api.h>
  28. #include <hal_reo.h>
  29. #include <cdp_txrx_handle.h>
  30. #include <wlan_cfg.h>
  31. #ifdef FEATURE_WDS
  32. #include "dp_txrx_wds.h"
  33. #endif
  34. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  35. #include "dp_tx_capture.h"
  36. #endif
  37. #ifdef QCA_PEER_EXT_STATS
  38. #include "dp_hist.h"
  39. #endif
  40. #ifdef FEATURE_WDS
  41. static inline bool
  42. dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
  43. struct dp_ast_entry *ast_entry)
  44. {
  45. /* if peer map v2 is enabled we are not freeing ast entry
  46. * here and it is supposed to be freed in unmap event (after
  47. * we receive delete confirmation from target)
  48. *
  49. * if peer_id is invalid we did not get the peer map event
  50. * for the peer free ast entry from here only in this case
  51. */
  52. if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
  53. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
  54. return true;
  55. return false;
  56. }
  57. #else
  58. static inline bool
  59. dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
  60. struct dp_ast_entry *ast_entry)
  61. {
  62. return false;
  63. }
  64. #endif
  65. static inline void
  66. dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
  67. uint8_t valid)
  68. {
  69. params->u.upd_queue_params.update_svld = 1;
  70. params->u.upd_queue_params.svld = valid;
  71. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  72. "%s: Setting SSN valid bit to %d",
  73. __func__, valid);
  74. }
  75. static inline int dp_peer_find_mac_addr_cmp(
  76. union dp_align_mac_addr *mac_addr1,
  77. union dp_align_mac_addr *mac_addr2)
  78. {
  79. /*
  80. * Intentionally use & rather than &&.
  81. * because the operands are binary rather than generic boolean,
  82. * the functionality is equivalent.
  83. * Using && has the advantage of short-circuited evaluation,
  84. * but using & has the advantage of no conditional branching,
  85. * which is a more significant benefit.
  86. */
  87. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  88. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  89. }
  90. static int dp_peer_ast_table_attach(struct dp_soc *soc)
  91. {
  92. uint32_t max_ast_index;
  93. max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
  94. /* allocate ast_table for ast entry to ast_index map */
  95. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  96. "\n<=== cfg max ast idx %d ====>", max_ast_index);
  97. soc->ast_table = qdf_mem_malloc(max_ast_index *
  98. sizeof(struct dp_ast_entry *));
  99. if (!soc->ast_table) {
  100. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  101. "%s: ast_table memory allocation failed", __func__);
  102. return QDF_STATUS_E_NOMEM;
  103. }
  104. return 0; /* success */
  105. }
  106. /*
  107. * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
  108. * @soc: soc handle
  109. *
  110. * return: none
  111. */
  112. static int dp_peer_find_map_attach(struct dp_soc *soc)
  113. {
  114. uint32_t max_peers, peer_map_size;
  115. max_peers = soc->max_peers;
  116. /* allocate the peer ID -> peer object map */
  117. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  118. "\n<=== cfg max peer id %d ====>", max_peers);
  119. peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
  120. soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
  121. if (!soc->peer_id_to_obj_map) {
  122. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  123. "%s: peer map memory allocation failed", __func__);
  124. return QDF_STATUS_E_NOMEM;
  125. }
  126. /*
  127. * The peer_id_to_obj_map doesn't really need to be initialized,
  128. * since elements are only used after they have been individually
  129. * initialized.
  130. * However, it is convenient for debugging to have all elements
  131. * that are not in use set to 0.
  132. */
  133. qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
  134. qdf_spinlock_create(&soc->peer_map_lock);
  135. return 0; /* success */
  136. }
  137. static int dp_log2_ceil(unsigned int value)
  138. {
  139. unsigned int tmp = value;
  140. int log2 = -1;
  141. while (tmp) {
  142. log2++;
  143. tmp >>= 1;
  144. }
  145. if (1 << log2 != value)
  146. log2++;
  147. return log2;
  148. }
  149. #define DP_PEER_HASH_LOAD_MULT 2
  150. #define DP_PEER_HASH_LOAD_SHIFT 0
  151. #define DP_AST_HASH_LOAD_MULT 2
  152. #define DP_AST_HASH_LOAD_SHIFT 0
  153. /*
  154. * dp_peer_find_hash_attach() - allocate memory for peer_hash table
  155. * @soc: soc handle
  156. *
  157. * return: none
  158. */
  159. static int dp_peer_find_hash_attach(struct dp_soc *soc)
  160. {
  161. int i, hash_elems, log2;
  162. /* allocate the peer MAC address -> peer object hash table */
  163. hash_elems = soc->max_peers;
  164. hash_elems *= DP_PEER_HASH_LOAD_MULT;
  165. hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
  166. log2 = dp_log2_ceil(hash_elems);
  167. hash_elems = 1 << log2;
  168. soc->peer_hash.mask = hash_elems - 1;
  169. soc->peer_hash.idx_bits = log2;
  170. /* allocate an array of TAILQ peer object lists */
  171. soc->peer_hash.bins = qdf_mem_malloc(
  172. hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
  173. if (!soc->peer_hash.bins)
  174. return QDF_STATUS_E_NOMEM;
  175. for (i = 0; i < hash_elems; i++)
  176. TAILQ_INIT(&soc->peer_hash.bins[i]);
  177. qdf_spinlock_create(&soc->peer_hash_lock);
  178. return 0;
  179. }
  180. /*
  181. * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
  182. * @soc: soc handle
  183. *
  184. * return: none
  185. */
  186. static void dp_peer_find_hash_detach(struct dp_soc *soc)
  187. {
  188. if (soc->peer_hash.bins) {
  189. qdf_mem_free(soc->peer_hash.bins);
  190. soc->peer_hash.bins = NULL;
  191. qdf_spinlock_destroy(&soc->peer_hash_lock);
  192. }
  193. }
  194. static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
  195. union dp_align_mac_addr *mac_addr)
  196. {
  197. unsigned index;
  198. index =
  199. mac_addr->align2.bytes_ab ^
  200. mac_addr->align2.bytes_cd ^
  201. mac_addr->align2.bytes_ef;
  202. index ^= index >> soc->peer_hash.idx_bits;
  203. index &= soc->peer_hash.mask;
  204. return index;
  205. }
  206. /*
  207. * dp_peer_find_hash_add() - add peer to peer_hash_table
  208. * @soc: soc handle
  209. * @peer: peer handle
  210. *
  211. * return: none
  212. */
  213. void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
  214. {
  215. unsigned index;
  216. index = dp_peer_find_hash_index(soc, &peer->mac_addr);
  217. qdf_spin_lock_bh(&soc->peer_hash_lock);
  218. if (dp_peer_get_ref(soc, peer) != QDF_STATUS_SUCCESS) {
  219. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  220. "unable to get peer reference at MAP mac %pM",
  221. peer ? peer->mac_addr.raw : NULL);
  222. qdf_spin_unlock_bh(&soc->peer_hash_lock);
  223. return;
  224. }
  225. /*
  226. * It is important to add the new peer at the tail of the peer list
  227. * with the bin index. Together with having the hash_find function
  228. * search from head to tail, this ensures that if two entries with
  229. * the same MAC address are stored, the one added first will be
  230. * found first.
  231. */
  232. TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
  233. qdf_spin_unlock_bh(&soc->peer_hash_lock);
  234. }
  235. /*
  236. * dp_peer_vdev_list_add() - add peer into vdev list
  237. * @soc: soc handle
  238. * @vdev: vdev handle
  239. * @peer: peer handle
  240. *
  241. * return: none
  242. */
  243. void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
  244. struct dp_peer *peer)
  245. {
  246. qdf_spin_lock_bh(&vdev->peer_list_lock);
  247. if (dp_peer_get_ref(soc, peer) != QDF_STATUS_SUCCESS) {
  248. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  249. "unable to get peer reference at MAP mac %pM",
  250. peer ? peer->mac_addr.raw : NULL);
  251. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  252. return;
  253. }
  254. /* add this peer into the vdev's list */
  255. if (wlan_op_mode_sta == vdev->opmode)
  256. TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
  257. else
  258. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  259. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  260. }
  261. /*
  262. * dp_peer_vdev_list_remove() - remove peer from vdev list
  263. * @soc: SoC handle
  264. * @vdev: VDEV handle
  265. * @peer: peer handle
  266. *
  267. * Return: none
  268. */
  269. void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
  270. struct dp_peer *peer)
  271. {
  272. uint8_t found = 0;
  273. struct dp_peer *tmppeer = NULL;
  274. qdf_spin_lock_bh(&vdev->peer_list_lock);
  275. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  276. if (tmppeer == peer) {
  277. found = 1;
  278. break;
  279. }
  280. }
  281. if (found) {
  282. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  283. peer_list_elem);
  284. dp_peer_unref_delete(peer);
  285. } else {
  286. /*Ignoring the remove operation as peer not found*/
  287. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  288. "peer:%pK not found in vdev:%pK peerlist:%pK",
  289. peer, vdev, &peer->vdev->peer_list);
  290. }
  291. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  292. }
  293. /*
  294. * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
  295. * @soc: SoC handle
  296. * @peer: peer handle
  297. * @peer_id: peer_id
  298. *
  299. * Return: None
  300. */
  301. void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
  302. struct dp_peer *peer,
  303. uint16_t peer_id)
  304. {
  305. QDF_ASSERT(peer_id <= soc->max_peers);
  306. qdf_spin_lock_bh(&soc->peer_map_lock);
  307. if (dp_peer_get_ref(soc, peer) != QDF_STATUS_SUCCESS) {
  308. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  309. "unable to get peer reference at MAP mac %pM peer_id %u",
  310. peer ? peer->mac_addr.raw : NULL, peer_id);
  311. qdf_spin_unlock_bh(&soc->peer_map_lock);
  312. return;
  313. }
  314. if (!soc->peer_id_to_obj_map[peer_id]) {
  315. soc->peer_id_to_obj_map[peer_id] = peer;
  316. } else {
  317. /* Peer map event came for peer_id which
  318. * is already mapped, this is not expected
  319. */
  320. QDF_ASSERT(0);
  321. }
  322. qdf_spin_unlock_bh(&soc->peer_map_lock);
  323. }
  324. /*
  325. * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
  326. * @soc: SoC handle
  327. * @peer_id: peer_id
  328. *
  329. * Return: None
  330. */
  331. void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
  332. uint16_t peer_id)
  333. {
  334. struct dp_peer *peer = NULL;
  335. QDF_ASSERT(peer_id <= soc->max_peers);
  336. qdf_spin_lock_bh(&soc->peer_map_lock);
  337. peer = soc->peer_id_to_obj_map[peer_id];
  338. soc->peer_id_to_obj_map[peer_id] = NULL;
  339. dp_peer_unref_delete(peer);
  340. qdf_spin_unlock_bh(&soc->peer_map_lock);
  341. }
  342. /*
  343. * dp_peer_exist_on_pdev - check if peer with mac address exist on pdev
  344. *
  345. * @soc: Datapath SOC handle
  346. * @peer_mac_addr: peer mac address
  347. * @mac_addr_is_aligned: is mac address aligned
  348. * @pdev: Datapath PDEV handle
  349. *
  350. * Return: true if peer found else return false
  351. */
  352. static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
  353. uint8_t *peer_mac_addr,
  354. int mac_addr_is_aligned,
  355. struct dp_pdev *pdev)
  356. {
  357. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  358. unsigned int index;
  359. struct dp_peer *peer;
  360. bool found = false;
  361. if (mac_addr_is_aligned) {
  362. mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
  363. } else {
  364. qdf_mem_copy(
  365. &local_mac_addr_aligned.raw[0],
  366. peer_mac_addr, QDF_MAC_ADDR_SIZE);
  367. mac_addr = &local_mac_addr_aligned;
  368. }
  369. index = dp_peer_find_hash_index(soc, mac_addr);
  370. qdf_spin_lock_bh(&soc->peer_hash_lock);
  371. TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
  372. if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
  373. (peer->vdev->pdev == pdev)) {
  374. found = true;
  375. break;
  376. }
  377. }
  378. qdf_spin_unlock_bh(&soc->peer_hash_lock);
  379. return found;
  380. }
  381. #ifdef FEATURE_AST
  382. /*
  383. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  384. * @soc: SoC handle
  385. *
  386. * Return: None
  387. */
  388. static int dp_peer_ast_hash_attach(struct dp_soc *soc)
  389. {
  390. int i, hash_elems, log2;
  391. unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
  392. hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
  393. DP_AST_HASH_LOAD_SHIFT);
  394. log2 = dp_log2_ceil(hash_elems);
  395. hash_elems = 1 << log2;
  396. soc->ast_hash.mask = hash_elems - 1;
  397. soc->ast_hash.idx_bits = log2;
  398. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  399. "ast hash_elems: %d, max_ast_idx: %d",
  400. hash_elems, max_ast_idx);
  401. /* allocate an array of TAILQ peer object lists */
  402. soc->ast_hash.bins = qdf_mem_malloc(
  403. hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
  404. dp_ast_entry)));
  405. if (!soc->ast_hash.bins)
  406. return QDF_STATUS_E_NOMEM;
  407. for (i = 0; i < hash_elems; i++)
  408. TAILQ_INIT(&soc->ast_hash.bins[i]);
  409. return 0;
  410. }
  411. /*
  412. * dp_peer_ast_cleanup() - cleanup the references
  413. * @soc: SoC handle
  414. * @ast: ast entry
  415. *
  416. * Return: None
  417. */
  418. static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
  419. struct dp_ast_entry *ast)
  420. {
  421. txrx_ast_free_cb cb = ast->callback;
  422. void *cookie = ast->cookie;
  423. /* Call the callbacks to free up the cookie */
  424. if (cb) {
  425. ast->callback = NULL;
  426. ast->cookie = NULL;
  427. cb(soc->ctrl_psoc,
  428. dp_soc_to_cdp_soc(soc),
  429. cookie,
  430. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  431. }
  432. }
  433. /*
  434. * dp_peer_ast_hash_detach() - Free AST Hash table
  435. * @soc: SoC handle
  436. *
  437. * Return: None
  438. */
  439. static void dp_peer_ast_hash_detach(struct dp_soc *soc)
  440. {
  441. unsigned int index;
  442. struct dp_ast_entry *ast, *ast_next;
  443. if (!soc->ast_hash.mask)
  444. return;
  445. if (!soc->ast_hash.bins)
  446. return;
  447. qdf_spin_lock_bh(&soc->ast_lock);
  448. for (index = 0; index <= soc->ast_hash.mask; index++) {
  449. if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
  450. TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
  451. hash_list_elem, ast_next) {
  452. TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
  453. hash_list_elem);
  454. dp_peer_ast_cleanup(soc, ast);
  455. soc->num_ast_entries--;
  456. qdf_mem_free(ast);
  457. }
  458. }
  459. }
  460. qdf_spin_unlock_bh(&soc->ast_lock);
  461. qdf_mem_free(soc->ast_hash.bins);
  462. soc->ast_hash.bins = NULL;
  463. }
  464. /*
  465. * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
  466. * @soc: SoC handle
  467. *
  468. * Return: AST hash
  469. */
  470. static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
  471. union dp_align_mac_addr *mac_addr)
  472. {
  473. uint32_t index;
  474. index =
  475. mac_addr->align2.bytes_ab ^
  476. mac_addr->align2.bytes_cd ^
  477. mac_addr->align2.bytes_ef;
  478. index ^= index >> soc->ast_hash.idx_bits;
  479. index &= soc->ast_hash.mask;
  480. return index;
  481. }
  482. /*
  483. * dp_peer_ast_hash_add() - Add AST entry into hash table
  484. * @soc: SoC handle
  485. *
  486. * This function adds the AST entry into SoC AST hash table
  487. * It assumes caller has taken the ast lock to protect the access to this table
  488. *
  489. * Return: None
  490. */
  491. static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
  492. struct dp_ast_entry *ase)
  493. {
  494. uint32_t index;
  495. index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
  496. TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
  497. }
  498. /*
  499. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  500. * @soc: SoC handle
  501. *
  502. * This function removes the AST entry from soc AST hash table
  503. * It assumes caller has taken the ast lock to protect the access to this table
  504. *
  505. * Return: None
  506. */
  507. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  508. struct dp_ast_entry *ase)
  509. {
  510. unsigned index;
  511. struct dp_ast_entry *tmpase;
  512. int found = 0;
  513. index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
  514. /* Check if tail is not empty before delete*/
  515. QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
  516. TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
  517. if (tmpase == ase) {
  518. found = 1;
  519. break;
  520. }
  521. }
  522. QDF_ASSERT(found);
  523. TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
  524. }
  525. /*
  526. * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
  527. * @soc: SoC handle
  528. * @peer: peer handle
  529. * @ast_mac_addr: mac address
  530. *
  531. * It assumes caller has taken the ast lock to protect the access to ast list
  532. *
  533. * Return: AST entry
  534. */
  535. struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
  536. struct dp_peer *peer,
  537. uint8_t *ast_mac_addr)
  538. {
  539. struct dp_ast_entry *ast_entry = NULL;
  540. union dp_align_mac_addr *mac_addr =
  541. (union dp_align_mac_addr *)ast_mac_addr;
  542. TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
  543. if (!dp_peer_find_mac_addr_cmp(mac_addr,
  544. &ast_entry->mac_addr)) {
  545. return ast_entry;
  546. }
  547. }
  548. return NULL;
  549. }
  550. /*
  551. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  552. * @soc: SoC handle
  553. *
  554. * It assumes caller has taken the ast lock to protect the access to
  555. * AST hash table
  556. *
  557. * Return: AST entry
  558. */
  559. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  560. uint8_t *ast_mac_addr,
  561. uint8_t pdev_id)
  562. {
  563. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  564. uint32_t index;
  565. struct dp_ast_entry *ase;
  566. qdf_mem_copy(&local_mac_addr_aligned.raw[0],
  567. ast_mac_addr, QDF_MAC_ADDR_SIZE);
  568. mac_addr = &local_mac_addr_aligned;
  569. index = dp_peer_ast_hash_index(soc, mac_addr);
  570. TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
  571. if ((pdev_id == ase->pdev_id) &&
  572. !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
  573. return ase;
  574. }
  575. }
  576. return NULL;
  577. }
  578. /*
  579. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  580. * @soc: SoC handle
  581. *
  582. * It assumes caller has taken the ast lock to protect the access to
  583. * AST hash table
  584. *
  585. * Return: AST entry
  586. */
  587. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  588. uint8_t *ast_mac_addr)
  589. {
  590. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  591. unsigned index;
  592. struct dp_ast_entry *ase;
  593. qdf_mem_copy(&local_mac_addr_aligned.raw[0],
  594. ast_mac_addr, QDF_MAC_ADDR_SIZE);
  595. mac_addr = &local_mac_addr_aligned;
  596. index = dp_peer_ast_hash_index(soc, mac_addr);
  597. TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
  598. if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
  599. return ase;
  600. }
  601. }
  602. return NULL;
  603. }
  604. /*
  605. * dp_peer_map_ast() - Map the ast entry with HW AST Index
  606. * @soc: SoC handle
  607. * @peer: peer to which ast node belongs
  608. * @mac_addr: MAC address of ast node
  609. * @hw_peer_id: HW AST Index returned by target in peer map event
  610. * @vdev_id: vdev id for VAP to which the peer belongs to
  611. * @ast_hash: ast hash value in HW
  612. * @is_wds: flag to indicate peer map event for WDS ast entry
  613. *
  614. * Return: QDF_STATUS code
  615. */
  616. static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
  617. struct dp_peer *peer,
  618. uint8_t *mac_addr,
  619. uint16_t hw_peer_id,
  620. uint8_t vdev_id,
  621. uint16_t ast_hash,
  622. uint8_t is_wds)
  623. {
  624. struct dp_ast_entry *ast_entry = NULL;
  625. enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
  626. void *cookie = NULL;
  627. txrx_ast_free_cb cb = NULL;
  628. QDF_STATUS err = QDF_STATUS_SUCCESS;
  629. if (!peer) {
  630. return QDF_STATUS_E_INVAL;
  631. }
  632. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  633. "%s: peer %pK ID %d vid %d mac %pM",
  634. __func__, peer, hw_peer_id, vdev_id, mac_addr);
  635. qdf_spin_lock_bh(&soc->ast_lock);
  636. ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
  637. if (is_wds) {
  638. /*
  639. * In certain cases like Auth attack on a repeater
  640. * can result in the number of ast_entries falling
  641. * in the same hash bucket to exceed the max_skid
  642. * length supported by HW in root AP. In these cases
  643. * the FW will return the hw_peer_id (ast_index) as
  644. * 0xffff indicating HW could not add the entry in
  645. * its table. Host has to delete the entry from its
  646. * table in these cases.
  647. */
  648. if (hw_peer_id == HTT_INVALID_PEER) {
  649. DP_STATS_INC(soc, ast.map_err, 1);
  650. if (ast_entry) {
  651. if (ast_entry->is_mapped) {
  652. soc->ast_table[ast_entry->ast_idx] =
  653. NULL;
  654. }
  655. cb = ast_entry->callback;
  656. cookie = ast_entry->cookie;
  657. peer_type = ast_entry->type;
  658. dp_peer_unlink_ast_entry(soc, ast_entry);
  659. dp_peer_free_ast_entry(soc, ast_entry);
  660. qdf_spin_unlock_bh(&soc->ast_lock);
  661. if (cb) {
  662. cb(soc->ctrl_psoc,
  663. dp_soc_to_cdp_soc(soc),
  664. cookie,
  665. CDP_TXRX_AST_DELETED);
  666. }
  667. } else {
  668. qdf_spin_unlock_bh(&soc->ast_lock);
  669. dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
  670. peer, peer->peer_id,
  671. peer->mac_addr.raw, mac_addr,
  672. vdev_id, is_wds);
  673. }
  674. err = QDF_STATUS_E_INVAL;
  675. dp_hmwds_ast_add_notify(peer, mac_addr,
  676. peer_type, err, true);
  677. return err;
  678. }
  679. }
  680. if (ast_entry) {
  681. ast_entry->ast_idx = hw_peer_id;
  682. soc->ast_table[hw_peer_id] = ast_entry;
  683. ast_entry->is_active = TRUE;
  684. peer_type = ast_entry->type;
  685. ast_entry->ast_hash_value = ast_hash;
  686. ast_entry->is_mapped = TRUE;
  687. }
  688. if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
  689. if (soc->cdp_soc.ol_ops->peer_map_event) {
  690. soc->cdp_soc.ol_ops->peer_map_event(
  691. soc->ctrl_psoc, peer->peer_id,
  692. hw_peer_id, vdev_id,
  693. mac_addr, peer_type, ast_hash);
  694. }
  695. } else {
  696. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  697. "AST entry not found");
  698. err = QDF_STATUS_E_NOENT;
  699. }
  700. qdf_spin_unlock_bh(&soc->ast_lock);
  701. dp_hmwds_ast_add_notify(peer, mac_addr,
  702. peer_type, err, true);
  703. return err;
  704. }
  705. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  706. struct cdp_soc *dp_soc,
  707. void *cookie,
  708. enum cdp_ast_free_status status)
  709. {
  710. struct dp_ast_free_cb_params *param =
  711. (struct dp_ast_free_cb_params *)cookie;
  712. struct dp_soc *soc = (struct dp_soc *)dp_soc;
  713. struct dp_peer *peer = NULL;
  714. QDF_STATUS err = QDF_STATUS_SUCCESS;
  715. if (status != CDP_TXRX_AST_DELETED) {
  716. qdf_mem_free(cookie);
  717. return;
  718. }
  719. peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
  720. 0, param->vdev_id);
  721. if (peer) {
  722. err = dp_peer_add_ast(soc, peer,
  723. &param->mac_addr.raw[0],
  724. param->type,
  725. param->flags);
  726. dp_hmwds_ast_add_notify(peer, &param->mac_addr.raw[0],
  727. param->type, err, false);
  728. dp_peer_unref_delete(peer);
  729. }
  730. qdf_mem_free(cookie);
  731. }
  732. /*
  733. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  734. * @soc: SoC handle
  735. * @peer: peer to which ast node belongs
  736. * @mac_addr: MAC address of ast node
  737. * @is_self: Is this base AST entry with peer mac address
  738. *
  739. * This API is used by WDS source port learning function to
  740. * add a new AST entry into peer AST list
  741. *
  742. * Return: QDF_STATUS code
  743. */
  744. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
  745. struct dp_peer *peer,
  746. uint8_t *mac_addr,
  747. enum cdp_txrx_ast_entry_type type,
  748. uint32_t flags)
  749. {
  750. struct dp_ast_entry *ast_entry = NULL;
  751. struct dp_vdev *vdev = NULL;
  752. struct dp_pdev *pdev = NULL;
  753. uint8_t next_node_mac[6];
  754. txrx_ast_free_cb cb = NULL;
  755. void *cookie = NULL;
  756. struct dp_peer *vap_bss_peer = NULL;
  757. bool is_peer_found = false;
  758. vdev = peer->vdev;
  759. if (!vdev) {
  760. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  761. FL("Peers vdev is NULL"));
  762. QDF_ASSERT(0);
  763. return QDF_STATUS_E_INVAL;
  764. }
  765. pdev = vdev->pdev;
  766. is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
  767. qdf_spin_lock_bh(&soc->ast_lock);
  768. if (peer->delete_in_progress) {
  769. qdf_spin_unlock_bh(&soc->ast_lock);
  770. return QDF_STATUS_E_BUSY;
  771. }
  772. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  773. "%s: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
  774. __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
  775. peer->mac_addr.raw, peer, mac_addr);
  776. /* fw supports only 2 times the max_peers ast entries */
  777. if (soc->num_ast_entries >=
  778. wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
  779. qdf_spin_unlock_bh(&soc->ast_lock);
  780. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  781. FL("Max ast entries reached"));
  782. return QDF_STATUS_E_RESOURCES;
  783. }
  784. /* If AST entry already exists , just return from here
  785. * ast entry with same mac address can exist on different radios
  786. * if ast_override support is enabled use search by pdev in this
  787. * case
  788. */
  789. if (soc->ast_override_support) {
  790. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
  791. pdev->pdev_id);
  792. if (ast_entry) {
  793. if ((type == CDP_TXRX_AST_TYPE_MEC) &&
  794. (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
  795. ast_entry->is_active = TRUE;
  796. qdf_spin_unlock_bh(&soc->ast_lock);
  797. return QDF_STATUS_E_ALREADY;
  798. }
  799. if (is_peer_found) {
  800. /* During WDS to static roaming, peer is added
  801. * to the list before static AST entry create.
  802. * So, allow AST entry for STATIC type
  803. * even if peer is present
  804. */
  805. if (type != CDP_TXRX_AST_TYPE_STATIC) {
  806. qdf_spin_unlock_bh(&soc->ast_lock);
  807. return QDF_STATUS_E_ALREADY;
  808. }
  809. }
  810. } else {
  811. /* For HWMWDS_SEC entries can be added for same mac address
  812. * do not check for existing entry
  813. */
  814. if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
  815. goto add_ast_entry;
  816. ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
  817. if (ast_entry) {
  818. if ((type == CDP_TXRX_AST_TYPE_MEC) &&
  819. (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
  820. ast_entry->is_active = TRUE;
  821. if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
  822. !ast_entry->delete_in_progress) {
  823. qdf_spin_unlock_bh(&soc->ast_lock);
  824. return QDF_STATUS_E_ALREADY;
  825. }
  826. /* Add for HMWDS entry we cannot be ignored if there
  827. * is AST entry with same mac address
  828. *
  829. * if ast entry exists with the requested mac address
  830. * send a delete command and register callback which
  831. * can take care of adding HMWDS ast enty on delete
  832. * confirmation from target
  833. */
  834. if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
  835. struct dp_ast_free_cb_params *param = NULL;
  836. if (ast_entry->type ==
  837. CDP_TXRX_AST_TYPE_WDS_HM_SEC)
  838. goto add_ast_entry;
  839. /* save existing callback */
  840. if (ast_entry->callback) {
  841. cb = ast_entry->callback;
  842. cookie = ast_entry->cookie;
  843. }
  844. param = qdf_mem_malloc(sizeof(*param));
  845. if (!param) {
  846. QDF_TRACE(QDF_MODULE_ID_TXRX,
  847. QDF_TRACE_LEVEL_ERROR,
  848. "Allocation failed");
  849. qdf_spin_unlock_bh(&soc->ast_lock);
  850. return QDF_STATUS_E_NOMEM;
  851. }
  852. qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
  853. QDF_MAC_ADDR_SIZE);
  854. qdf_mem_copy(&param->peer_mac_addr.raw[0],
  855. &peer->mac_addr.raw[0],
  856. QDF_MAC_ADDR_SIZE);
  857. param->type = type;
  858. param->flags = flags;
  859. param->vdev_id = vdev->vdev_id;
  860. ast_entry->callback = dp_peer_free_hmwds_cb;
  861. ast_entry->pdev_id = vdev->pdev->pdev_id;
  862. ast_entry->type = type;
  863. ast_entry->cookie = (void *)param;
  864. if (!ast_entry->delete_in_progress)
  865. dp_peer_del_ast(soc, ast_entry);
  866. qdf_spin_unlock_bh(&soc->ast_lock);
  867. /* Call the saved callback*/
  868. if (cb) {
  869. cb(soc->ctrl_psoc,
  870. dp_soc_to_cdp_soc(soc),
  871. cookie,
  872. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  873. }
  874. return QDF_STATUS_E_AGAIN;
  875. }
  876. /* Modify an already existing AST entry from type
  877. * WDS to MEC on promption. This serves as a fix when
  878. * backbone of interfaces are interchanged wherein
  879. * wds entr becomes its own MEC. The entry should be
  880. * replaced only when the ast_entry peer matches the
  881. * peer received in mec event. This additional check
  882. * is needed in wds repeater cases where a multicast
  883. * packet from station to the root via the repeater
  884. * should not remove the wds entry.
  885. */
  886. if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
  887. (type == CDP_TXRX_AST_TYPE_MEC) &&
  888. (ast_entry->peer == peer)) {
  889. ast_entry->is_active = FALSE;
  890. dp_peer_del_ast(soc, ast_entry);
  891. }
  892. qdf_spin_unlock_bh(&soc->ast_lock);
  893. return QDF_STATUS_E_ALREADY;
  894. }
  895. }
  896. add_ast_entry:
  897. ast_entry = (struct dp_ast_entry *)
  898. qdf_mem_malloc(sizeof(struct dp_ast_entry));
  899. if (!ast_entry) {
  900. qdf_spin_unlock_bh(&soc->ast_lock);
  901. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  902. FL("fail to allocate ast_entry"));
  903. QDF_ASSERT(0);
  904. return QDF_STATUS_E_NOMEM;
  905. }
  906. qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
  907. ast_entry->pdev_id = vdev->pdev->pdev_id;
  908. ast_entry->is_mapped = false;
  909. ast_entry->delete_in_progress = false;
  910. switch (type) {
  911. case CDP_TXRX_AST_TYPE_STATIC:
  912. peer->self_ast_entry = ast_entry;
  913. ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
  914. if (peer->vdev->opmode == wlan_op_mode_sta)
  915. ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
  916. break;
  917. case CDP_TXRX_AST_TYPE_SELF:
  918. peer->self_ast_entry = ast_entry;
  919. ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
  920. break;
  921. case CDP_TXRX_AST_TYPE_WDS:
  922. ast_entry->next_hop = 1;
  923. ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
  924. break;
  925. case CDP_TXRX_AST_TYPE_WDS_HM:
  926. ast_entry->next_hop = 1;
  927. ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
  928. break;
  929. case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
  930. ast_entry->next_hop = 1;
  931. ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
  932. break;
  933. case CDP_TXRX_AST_TYPE_MEC:
  934. ast_entry->next_hop = 1;
  935. ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
  936. break;
  937. case CDP_TXRX_AST_TYPE_DA:
  938. vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev);
  939. if (!vap_bss_peer) {
  940. qdf_spin_unlock_bh(&soc->ast_lock);
  941. qdf_mem_free(ast_entry);
  942. return QDF_STATUS_E_FAILURE;
  943. }
  944. peer = vap_bss_peer;
  945. ast_entry->next_hop = 1;
  946. ast_entry->type = CDP_TXRX_AST_TYPE_DA;
  947. break;
  948. default:
  949. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  950. FL("Incorrect AST entry type"));
  951. }
  952. ast_entry->is_active = TRUE;
  953. DP_STATS_INC(soc, ast.added, 1);
  954. soc->num_ast_entries++;
  955. dp_peer_ast_hash_add(soc, ast_entry);
  956. ast_entry->peer = peer;
  957. if (type == CDP_TXRX_AST_TYPE_MEC)
  958. qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
  959. else
  960. qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
  961. TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
  962. if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
  963. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
  964. (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
  965. (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
  966. if (QDF_STATUS_SUCCESS ==
  967. soc->cdp_soc.ol_ops->peer_add_wds_entry(
  968. soc->ctrl_psoc,
  969. peer->vdev->vdev_id,
  970. peer->mac_addr.raw,
  971. peer->peer_id,
  972. mac_addr,
  973. next_node_mac,
  974. flags,
  975. ast_entry->type)) {
  976. if (vap_bss_peer)
  977. dp_peer_unref_delete(vap_bss_peer);
  978. qdf_spin_unlock_bh(&soc->ast_lock);
  979. return QDF_STATUS_SUCCESS;
  980. }
  981. }
  982. if (vap_bss_peer)
  983. dp_peer_unref_delete(vap_bss_peer);
  984. qdf_spin_unlock_bh(&soc->ast_lock);
  985. return QDF_STATUS_E_FAILURE;
  986. }
  987. /*
  988. * dp_peer_free_ast_entry() - Free up the ast entry memory
  989. * @soc: SoC handle
  990. * @ast_entry: Address search entry
  991. *
  992. * This API is used to free up the memory associated with
  993. * AST entry.
  994. *
  995. * Return: None
  996. */
  997. void dp_peer_free_ast_entry(struct dp_soc *soc,
  998. struct dp_ast_entry *ast_entry)
  999. {
  1000. /*
  1001. * NOTE: Ensure that call to this API is done
  1002. * after soc->ast_lock is taken
  1003. */
  1004. ast_entry->callback = NULL;
  1005. ast_entry->cookie = NULL;
  1006. DP_STATS_INC(soc, ast.deleted, 1);
  1007. dp_peer_ast_hash_remove(soc, ast_entry);
  1008. dp_peer_ast_cleanup(soc, ast_entry);
  1009. qdf_mem_free(ast_entry);
  1010. soc->num_ast_entries--;
  1011. }
  1012. /*
  1013. * dp_peer_unlink_ast_entry() - Free up the ast entry memory
  1014. * @soc: SoC handle
  1015. * @ast_entry: Address search entry
  1016. *
  1017. * This API is used to remove/unlink AST entry from the peer list
  1018. * and hash list.
  1019. *
  1020. * Return: None
  1021. */
  1022. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  1023. struct dp_ast_entry *ast_entry)
  1024. {
  1025. /*
  1026. * NOTE: Ensure that call to this API is done
  1027. * after soc->ast_lock is taken
  1028. */
  1029. struct dp_peer *peer = ast_entry->peer;
  1030. TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
  1031. if (ast_entry == peer->self_ast_entry)
  1032. peer->self_ast_entry = NULL;
  1033. /*
  1034. * release the reference only if it is mapped
  1035. * to ast_table
  1036. */
  1037. if (ast_entry->is_mapped)
  1038. soc->ast_table[ast_entry->ast_idx] = NULL;
  1039. ast_entry->peer = NULL;
  1040. }
  1041. /*
  1042. * dp_peer_del_ast() - Delete and free AST entry
  1043. * @soc: SoC handle
  1044. * @ast_entry: AST entry of the node
  1045. *
  1046. * This function removes the AST entry from peer and soc tables
  1047. * It assumes caller has taken the ast lock to protect the access to these
  1048. * tables
  1049. *
  1050. * Return: None
  1051. */
  1052. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
  1053. {
  1054. struct dp_peer *peer;
  1055. if (!ast_entry)
  1056. return;
  1057. if (ast_entry->delete_in_progress)
  1058. return;
  1059. ast_entry->delete_in_progress = true;
  1060. peer = ast_entry->peer;
  1061. dp_peer_ast_send_wds_del(soc, ast_entry);
  1062. /* Remove SELF and STATIC entries in teardown itself */
  1063. if (!ast_entry->next_hop)
  1064. dp_peer_unlink_ast_entry(soc, ast_entry);
  1065. if (ast_entry->is_mapped)
  1066. soc->ast_table[ast_entry->ast_idx] = NULL;
  1067. /* if peer map v2 is enabled we are not freeing ast entry
  1068. * here and it is supposed to be freed in unmap event (after
  1069. * we receive delete confirmation from target)
  1070. *
  1071. * if peer_id is invalid we did not get the peer map event
  1072. * for the peer free ast entry from here only in this case
  1073. */
  1074. if (dp_peer_ast_free_in_unmap_supported(peer, ast_entry))
  1075. return;
  1076. /* for WDS secondary entry ast_entry->next_hop would be set so
  1077. * unlinking has to be done explicitly here.
  1078. * As this entry is not a mapped entry unmap notification from
  1079. * FW wil not come. Hence unlinkling is done right here.
  1080. */
  1081. if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
  1082. dp_peer_unlink_ast_entry(soc, ast_entry);
  1083. dp_peer_free_ast_entry(soc, ast_entry);
  1084. }
  1085. /*
  1086. * dp_peer_update_ast() - Delete and free AST entry
  1087. * @soc: SoC handle
  1088. * @peer: peer to which ast node belongs
  1089. * @ast_entry: AST entry of the node
  1090. * @flags: wds or hmwds
  1091. *
  1092. * This function update the AST entry to the roamed peer and soc tables
  1093. * It assumes caller has taken the ast lock to protect the access to these
  1094. * tables
  1095. *
  1096. * Return: 0 if ast entry is updated successfully
  1097. * -1 failure
  1098. */
  1099. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  1100. struct dp_ast_entry *ast_entry, uint32_t flags)
  1101. {
  1102. int ret = -1;
  1103. struct dp_peer *old_peer;
  1104. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1105. "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
  1106. __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
  1107. peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
  1108. peer->mac_addr.raw);
  1109. /* Do not send AST update in below cases
  1110. * 1) Ast entry delete has already triggered
  1111. * 2) Peer delete is already triggered
  1112. * 3) We did not get the HTT map for create event
  1113. */
  1114. if (ast_entry->delete_in_progress || peer->delete_in_progress ||
  1115. !ast_entry->is_mapped)
  1116. return ret;
  1117. if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
  1118. (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
  1119. (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
  1120. (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
  1121. return 0;
  1122. /*
  1123. * Avoids flood of WMI update messages sent to FW for same peer.
  1124. */
  1125. if (qdf_unlikely(ast_entry->peer == peer) &&
  1126. (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
  1127. (ast_entry->peer->vdev == peer->vdev) &&
  1128. (ast_entry->is_active))
  1129. return 0;
  1130. old_peer = ast_entry->peer;
  1131. TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
  1132. ast_entry->peer = peer;
  1133. ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
  1134. ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
  1135. ast_entry->is_active = TRUE;
  1136. TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
  1137. ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
  1138. soc->ctrl_psoc,
  1139. peer->vdev->vdev_id,
  1140. ast_entry->mac_addr.raw,
  1141. peer->mac_addr.raw,
  1142. flags);
  1143. return ret;
  1144. }
  1145. /*
  1146. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  1147. * @soc: SoC handle
  1148. * @ast_entry: AST entry of the node
  1149. *
  1150. * This function gets the pdev_id from the ast entry.
  1151. *
  1152. * Return: (uint8_t) pdev_id
  1153. */
  1154. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  1155. struct dp_ast_entry *ast_entry)
  1156. {
  1157. return ast_entry->pdev_id;
  1158. }
  1159. /*
  1160. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  1161. * @soc: SoC handle
  1162. * @ast_entry: AST entry of the node
  1163. *
  1164. * This function gets the next hop from the ast entry.
  1165. *
  1166. * Return: (uint8_t) next_hop
  1167. */
  1168. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  1169. struct dp_ast_entry *ast_entry)
  1170. {
  1171. return ast_entry->next_hop;
  1172. }
  1173. /*
  1174. * dp_peer_ast_set_type() - set type from the ast entry
  1175. * @soc: SoC handle
  1176. * @ast_entry: AST entry of the node
  1177. *
  1178. * This function sets the type in the ast entry.
  1179. *
  1180. * Return:
  1181. */
  1182. void dp_peer_ast_set_type(struct dp_soc *soc,
  1183. struct dp_ast_entry *ast_entry,
  1184. enum cdp_txrx_ast_entry_type type)
  1185. {
  1186. ast_entry->type = type;
  1187. }
  1188. #else
  1189. QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
  1190. struct dp_peer *peer,
  1191. uint8_t *mac_addr,
  1192. enum cdp_txrx_ast_entry_type type,
  1193. uint32_t flags)
  1194. {
  1195. return QDF_STATUS_E_FAILURE;
  1196. }
  1197. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
  1198. {
  1199. }
  1200. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  1201. struct dp_ast_entry *ast_entry, uint32_t flags)
  1202. {
  1203. return 1;
  1204. }
  1205. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  1206. uint8_t *ast_mac_addr)
  1207. {
  1208. return NULL;
  1209. }
  1210. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  1211. uint8_t *ast_mac_addr,
  1212. uint8_t pdev_id)
  1213. {
  1214. return NULL;
  1215. }
  1216. static int dp_peer_ast_hash_attach(struct dp_soc *soc)
  1217. {
  1218. return 0;
  1219. }
  1220. static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
  1221. struct dp_peer *peer,
  1222. uint8_t *mac_addr,
  1223. uint16_t hw_peer_id,
  1224. uint8_t vdev_id,
  1225. uint16_t ast_hash,
  1226. uint8_t is_wds)
  1227. {
  1228. return QDF_STATUS_SUCCESS;
  1229. }
  1230. static void dp_peer_ast_hash_detach(struct dp_soc *soc)
  1231. {
  1232. }
  1233. void dp_peer_ast_set_type(struct dp_soc *soc,
  1234. struct dp_ast_entry *ast_entry,
  1235. enum cdp_txrx_ast_entry_type type)
  1236. {
  1237. }
  1238. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  1239. struct dp_ast_entry *ast_entry)
  1240. {
  1241. return 0xff;
  1242. }
  1243. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  1244. struct dp_ast_entry *ast_entry)
  1245. {
  1246. return 0xff;
  1247. }
  1248. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  1249. struct dp_ast_entry *ast_entry, uint32_t flags)
  1250. {
  1251. return 1;
  1252. }
  1253. #endif
  1254. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  1255. struct dp_ast_entry *ast_entry)
  1256. {
  1257. struct dp_peer *peer = ast_entry->peer;
  1258. struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
  1259. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
  1260. "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
  1261. __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
  1262. peer->vdev->vdev_id, ast_entry->mac_addr.raw,
  1263. ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
  1264. /*
  1265. * If peer delete_in_progress is set, the peer is about to get
  1266. * teared down with a peer delete command to firmware,
  1267. * which will cleanup all the wds ast entries.
  1268. * So, no need to send explicit wds ast delete to firmware.
  1269. */
  1270. if (ast_entry->next_hop) {
  1271. cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
  1272. peer->vdev->vdev_id,
  1273. ast_entry->mac_addr.raw,
  1274. ast_entry->type,
  1275. !peer->delete_in_progress);
  1276. }
  1277. }
  1278. #ifdef FEATURE_WDS
  1279. /**
  1280. * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
  1281. * @soc: soc handle
  1282. * @peer: peer handle
  1283. *
  1284. * Free all the wds ast entries associated with peer
  1285. *
  1286. * Return: Number of wds ast entries freed
  1287. */
  1288. static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
  1289. struct dp_peer *peer)
  1290. {
  1291. TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
  1292. struct dp_ast_entry *ast_entry, *temp_ast_entry;
  1293. uint32_t num_ast = 0;
  1294. TAILQ_INIT(&ast_local_list);
  1295. qdf_spin_lock_bh(&soc->ast_lock);
  1296. DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
  1297. if (ast_entry->next_hop) {
  1298. if (ast_entry->is_mapped)
  1299. soc->ast_table[ast_entry->ast_idx] = NULL;
  1300. dp_peer_unlink_ast_entry(soc, ast_entry);
  1301. DP_STATS_INC(soc, ast.deleted, 1);
  1302. dp_peer_ast_hash_remove(soc, ast_entry);
  1303. TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
  1304. ase_list_elem);
  1305. soc->num_ast_entries--;
  1306. num_ast++;
  1307. }
  1308. }
  1309. qdf_spin_unlock_bh(&soc->ast_lock);
  1310. TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
  1311. temp_ast_entry) {
  1312. if (ast_entry->callback)
  1313. ast_entry->callback(soc->ctrl_psoc,
  1314. dp_soc_to_cdp_soc(soc),
  1315. ast_entry->cookie,
  1316. CDP_TXRX_AST_DELETED);
  1317. qdf_mem_free(ast_entry);
  1318. }
  1319. return num_ast;
  1320. }
  1321. /**
  1322. * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
  1323. * @soc: soc handle
  1324. * @peer: peer handle
  1325. * @free_wds_count - number of wds entries freed by FW with peer delete
  1326. *
  1327. * Free all the wds ast entries associated with peer and compare with
  1328. * the value received from firmware
  1329. *
  1330. * Return: Number of wds ast entries freed
  1331. */
  1332. static void
  1333. dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
  1334. uint32_t free_wds_count)
  1335. {
  1336. uint32_t wds_deleted = 0;
  1337. wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
  1338. if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
  1339. (free_wds_count != wds_deleted)) {
  1340. DP_STATS_INC(soc, ast.ast_mismatch, 1);
  1341. dp_alert("For peer %pK (mac: %pM)number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
  1342. peer, peer->mac_addr.raw, free_wds_count,
  1343. wds_deleted);
  1344. }
  1345. }
  1346. #else
  1347. static void
  1348. dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
  1349. uint32_t free_wds_count)
  1350. {
  1351. }
  1352. #endif
  1353. /**
  1354. * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
  1355. * @soc: soc handle
  1356. * @peer: peer handle
  1357. * @mac_addr: mac address of the AST entry to searc and delete
  1358. *
  1359. * find the ast entry from the peer list using the mac address and free
  1360. * the entry.
  1361. *
  1362. * Return: SUCCESS or NOENT
  1363. */
  1364. static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
  1365. struct dp_peer *peer,
  1366. uint8_t *mac_addr)
  1367. {
  1368. struct dp_ast_entry *ast_entry;
  1369. void *cookie = NULL;
  1370. txrx_ast_free_cb cb = NULL;
  1371. /*
  1372. * release the reference only if it is mapped
  1373. * to ast_table
  1374. */
  1375. qdf_spin_lock_bh(&soc->ast_lock);
  1376. ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
  1377. if (!ast_entry) {
  1378. qdf_spin_unlock_bh(&soc->ast_lock);
  1379. return QDF_STATUS_E_NOENT;
  1380. } else if (ast_entry->is_mapped) {
  1381. soc->ast_table[ast_entry->ast_idx] = NULL;
  1382. }
  1383. cb = ast_entry->callback;
  1384. cookie = ast_entry->cookie;
  1385. dp_peer_unlink_ast_entry(soc, ast_entry);
  1386. dp_peer_free_ast_entry(soc, ast_entry);
  1387. qdf_spin_unlock_bh(&soc->ast_lock);
  1388. if (cb) {
  1389. cb(soc->ctrl_psoc,
  1390. dp_soc_to_cdp_soc(soc),
  1391. cookie,
  1392. CDP_TXRX_AST_DELETED);
  1393. }
  1394. return QDF_STATUS_SUCCESS;
  1395. }
  1396. /*
  1397. * dp_peer_find_hash_find() - returns peer from peer_hash_table matching
  1398. * vdev_id and mac_address
  1399. * @soc: soc handle
  1400. * @peer_mac_addr: peer mac address
  1401. * @mac_addr_is_aligned: is mac addr alligned
  1402. * @vdev_id: vdev_id
  1403. *
  1404. * return: peer in sucsess
  1405. * NULL in failure
  1406. */
  1407. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  1408. uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
  1409. {
  1410. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  1411. unsigned index;
  1412. struct dp_peer *peer;
  1413. if (mac_addr_is_aligned) {
  1414. mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
  1415. } else {
  1416. qdf_mem_copy(
  1417. &local_mac_addr_aligned.raw[0],
  1418. peer_mac_addr, QDF_MAC_ADDR_SIZE);
  1419. mac_addr = &local_mac_addr_aligned;
  1420. }
  1421. index = dp_peer_find_hash_index(soc, mac_addr);
  1422. qdf_spin_lock_bh(&soc->peer_hash_lock);
  1423. TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
  1424. if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
  1425. ((peer->vdev->vdev_id == vdev_id) ||
  1426. (vdev_id == DP_VDEV_ALL))) {
  1427. /* take peer reference before returning */
  1428. if (dp_peer_get_ref(soc, peer) != QDF_STATUS_SUCCESS)
  1429. peer = NULL;
  1430. qdf_spin_unlock_bh(&soc->peer_hash_lock);
  1431. return peer;
  1432. }
  1433. }
  1434. qdf_spin_unlock_bh(&soc->peer_hash_lock);
  1435. return NULL; /* failure */
  1436. }
  1437. /*
  1438. * dp_peer_find_hash_remove() - remove peer from peer_hash_table
  1439. * @soc: soc handle
  1440. * @peer: peer handle
  1441. *
  1442. * return: none
  1443. */
  1444. void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
  1445. {
  1446. unsigned index;
  1447. struct dp_peer *tmppeer = NULL;
  1448. int found = 0;
  1449. index = dp_peer_find_hash_index(soc, &peer->mac_addr);
  1450. /* Check if tail is not empty before delete*/
  1451. QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
  1452. qdf_spin_lock_bh(&soc->peer_hash_lock);
  1453. TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
  1454. if (tmppeer == peer) {
  1455. found = 1;
  1456. break;
  1457. }
  1458. }
  1459. QDF_ASSERT(found);
  1460. TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
  1461. dp_peer_unref_delete(peer);
  1462. qdf_spin_unlock_bh(&soc->peer_hash_lock);
  1463. }
  1464. void dp_peer_find_hash_erase(struct dp_soc *soc)
  1465. {
  1466. int i;
  1467. /*
  1468. * Not really necessary to take peer_ref_mutex lock - by this point,
  1469. * it's known that the soc is no longer in use.
  1470. */
  1471. for (i = 0; i <= soc->peer_hash.mask; i++) {
  1472. if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
  1473. struct dp_peer *peer, *peer_next;
  1474. /*
  1475. * TAILQ_FOREACH_SAFE must be used here to avoid any
  1476. * memory access violation after peer is freed
  1477. */
  1478. TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
  1479. hash_list_elem, peer_next) {
  1480. /*
  1481. * Don't remove the peer from the hash table -
  1482. * that would modify the list we are currently
  1483. * traversing, and it's not necessary anyway.
  1484. */
  1485. /*
  1486. * Artificially adjust the peer's ref count to
  1487. * 1, so it will get deleted by
  1488. * dp_peer_unref_delete.
  1489. */
  1490. /* set to zero */
  1491. qdf_atomic_init(&peer->ref_cnt);
  1492. /* incr to one */
  1493. qdf_atomic_inc(&peer->ref_cnt);
  1494. dp_peer_unref_delete(peer);
  1495. }
  1496. }
  1497. }
  1498. }
  1499. static void dp_peer_ast_table_detach(struct dp_soc *soc)
  1500. {
  1501. if (soc->ast_table) {
  1502. qdf_mem_free(soc->ast_table);
  1503. soc->ast_table = NULL;
  1504. }
  1505. }
  1506. /*
  1507. * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
  1508. * @soc: soc handle
  1509. *
  1510. * return: none
  1511. */
  1512. static void dp_peer_find_map_detach(struct dp_soc *soc)
  1513. {
  1514. if (soc->peer_id_to_obj_map) {
  1515. qdf_mem_free(soc->peer_id_to_obj_map);
  1516. soc->peer_id_to_obj_map = NULL;
  1517. qdf_spinlock_destroy(&soc->peer_map_lock);
  1518. }
  1519. }
  1520. int dp_peer_find_attach(struct dp_soc *soc)
  1521. {
  1522. if (dp_peer_find_map_attach(soc))
  1523. return 1;
  1524. if (dp_peer_find_hash_attach(soc)) {
  1525. dp_peer_find_map_detach(soc);
  1526. return 1;
  1527. }
  1528. if (dp_peer_ast_table_attach(soc)) {
  1529. dp_peer_find_hash_detach(soc);
  1530. dp_peer_find_map_detach(soc);
  1531. return 1;
  1532. }
  1533. if (dp_peer_ast_hash_attach(soc)) {
  1534. dp_peer_ast_table_detach(soc);
  1535. dp_peer_find_hash_detach(soc);
  1536. dp_peer_find_map_detach(soc);
  1537. return 1;
  1538. }
  1539. return 0; /* success */
  1540. }
  1541. void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  1542. union hal_reo_status *reo_status)
  1543. {
  1544. struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
  1545. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  1546. if (queue_status->header.status == HAL_REO_CMD_DRAIN)
  1547. return;
  1548. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  1549. DP_PRINT_STATS("REO stats failure %d for TID %d\n",
  1550. queue_status->header.status, rx_tid->tid);
  1551. return;
  1552. }
  1553. DP_PRINT_STATS("REO queue stats (TID: %d):\n"
  1554. "ssn: %d\n"
  1555. "curr_idx : %d\n"
  1556. "pn_31_0 : %08x\n"
  1557. "pn_63_32 : %08x\n"
  1558. "pn_95_64 : %08x\n"
  1559. "pn_127_96 : %08x\n"
  1560. "last_rx_enq_tstamp : %08x\n"
  1561. "last_rx_deq_tstamp : %08x\n"
  1562. "rx_bitmap_31_0 : %08x\n"
  1563. "rx_bitmap_63_32 : %08x\n"
  1564. "rx_bitmap_95_64 : %08x\n"
  1565. "rx_bitmap_127_96 : %08x\n"
  1566. "rx_bitmap_159_128 : %08x\n"
  1567. "rx_bitmap_191_160 : %08x\n"
  1568. "rx_bitmap_223_192 : %08x\n"
  1569. "rx_bitmap_255_224 : %08x\n",
  1570. rx_tid->tid,
  1571. queue_status->ssn, queue_status->curr_idx,
  1572. queue_status->pn_31_0, queue_status->pn_63_32,
  1573. queue_status->pn_95_64, queue_status->pn_127_96,
  1574. queue_status->last_rx_enq_tstamp,
  1575. queue_status->last_rx_deq_tstamp,
  1576. queue_status->rx_bitmap_31_0,
  1577. queue_status->rx_bitmap_63_32,
  1578. queue_status->rx_bitmap_95_64,
  1579. queue_status->rx_bitmap_127_96,
  1580. queue_status->rx_bitmap_159_128,
  1581. queue_status->rx_bitmap_191_160,
  1582. queue_status->rx_bitmap_223_192,
  1583. queue_status->rx_bitmap_255_224);
  1584. DP_PRINT_STATS(
  1585. "curr_mpdu_cnt : %d\n"
  1586. "curr_msdu_cnt : %d\n"
  1587. "fwd_timeout_cnt : %d\n"
  1588. "fwd_bar_cnt : %d\n"
  1589. "dup_cnt : %d\n"
  1590. "frms_in_order_cnt : %d\n"
  1591. "bar_rcvd_cnt : %d\n"
  1592. "mpdu_frms_cnt : %d\n"
  1593. "msdu_frms_cnt : %d\n"
  1594. "total_byte_cnt : %d\n"
  1595. "late_recv_mpdu_cnt : %d\n"
  1596. "win_jump_2k : %d\n"
  1597. "hole_cnt : %d\n",
  1598. queue_status->curr_mpdu_cnt,
  1599. queue_status->curr_msdu_cnt,
  1600. queue_status->fwd_timeout_cnt,
  1601. queue_status->fwd_bar_cnt,
  1602. queue_status->dup_cnt,
  1603. queue_status->frms_in_order_cnt,
  1604. queue_status->bar_rcvd_cnt,
  1605. queue_status->mpdu_frms_cnt,
  1606. queue_status->msdu_frms_cnt,
  1607. queue_status->total_cnt,
  1608. queue_status->late_recv_mpdu_cnt,
  1609. queue_status->win_jump_2k,
  1610. queue_status->hole_cnt);
  1611. DP_PRINT_STATS("Addba Req : %d\n"
  1612. "Addba Resp : %d\n"
  1613. "Addba Resp success : %d\n"
  1614. "Addba Resp failed : %d\n"
  1615. "Delba Req received : %d\n"
  1616. "Delba Tx success : %d\n"
  1617. "Delba Tx Fail : %d\n"
  1618. "BA window size : %d\n"
  1619. "Pn size : %d\n",
  1620. rx_tid->num_of_addba_req,
  1621. rx_tid->num_of_addba_resp,
  1622. rx_tid->num_addba_rsp_success,
  1623. rx_tid->num_addba_rsp_failed,
  1624. rx_tid->num_of_delba_req,
  1625. rx_tid->delba_tx_success_cnt,
  1626. rx_tid->delba_tx_fail_cnt,
  1627. rx_tid->ba_win_size,
  1628. rx_tid->pn_size);
  1629. }
  1630. /*
  1631. * dp_peer_find_add_id() - map peer_id with peer
  1632. * @soc: soc handle
  1633. * @peer_mac_addr: peer mac address
  1634. * @peer_id: peer id to be mapped
  1635. * @hw_peer_id: HW ast index
  1636. * @vdev_id: vdev_id
  1637. *
  1638. * return: peer in success
  1639. * NULL in failure
  1640. */
  1641. static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
  1642. uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
  1643. uint8_t vdev_id)
  1644. {
  1645. struct dp_peer *peer;
  1646. QDF_ASSERT(peer_id <= soc->max_peers);
  1647. /* check if there's already a peer object with this MAC address */
  1648. peer = dp_peer_find_hash_find(soc, peer_mac_addr,
  1649. 0 /* is aligned */, vdev_id);
  1650. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1651. "%s: peer %pK ID %d vid %d mac %pM",
  1652. __func__, peer, peer_id, vdev_id, peer_mac_addr);
  1653. if (peer) {
  1654. /* peer's ref count was already incremented by
  1655. * peer_find_hash_find
  1656. */
  1657. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1658. "%s: ref_cnt: %d", __func__,
  1659. qdf_atomic_read(&peer->ref_cnt));
  1660. dp_peer_find_id_to_obj_add(soc, peer, peer_id);
  1661. if (peer->peer_id == HTT_INVALID_PEER) {
  1662. peer->peer_id = peer_id;
  1663. dp_peer_tid_peer_id_update(peer, peer->peer_id);
  1664. } else {
  1665. QDF_ASSERT(0);
  1666. }
  1667. return peer;
  1668. }
  1669. return NULL;
  1670. }
  1671. /**
  1672. * dp_rx_peer_map_handler() - handle peer map event from firmware
  1673. * @soc_handle - genereic soc handle
  1674. * @peeri_id - peer_id from firmware
  1675. * @hw_peer_id - ast index for this peer
  1676. * @vdev_id - vdev ID
  1677. * @peer_mac_addr - mac address of the peer
  1678. * @ast_hash - ast hash value
  1679. * @is_wds - flag to indicate peer map event for WDS ast entry
  1680. *
  1681. * associate the peer_id that firmware provided with peer entry
  1682. * and update the ast table in the host with the hw_peer_id.
  1683. *
  1684. * Return: QDF_STATUS code
  1685. */
  1686. QDF_STATUS
  1687. dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  1688. uint16_t hw_peer_id, uint8_t vdev_id,
  1689. uint8_t *peer_mac_addr, uint16_t ast_hash,
  1690. uint8_t is_wds)
  1691. {
  1692. struct dp_peer *peer = NULL;
  1693. enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
  1694. QDF_STATUS err = QDF_STATUS_SUCCESS;
  1695. dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %pM, vdev_id %d",
  1696. soc, peer_id, hw_peer_id,
  1697. peer_mac_addr, vdev_id);
  1698. /* Peer map event for WDS ast entry get the peer from
  1699. * obj map
  1700. */
  1701. if (is_wds) {
  1702. peer = dp_peer_find_by_id(soc, peer_id);
  1703. err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
  1704. vdev_id, ast_hash, is_wds);
  1705. if (peer)
  1706. dp_peer_unref_delete(peer);
  1707. } else {
  1708. /*
  1709. * It's the responsibility of the CP and FW to ensure
  1710. * that peer is created successfully. Ideally DP should
  1711. * not hit the below condition for directly assocaited
  1712. * peers.
  1713. */
  1714. if ((hw_peer_id < 0) ||
  1715. (hw_peer_id >=
  1716. wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  1717. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1718. "invalid hw_peer_id: %d", hw_peer_id);
  1719. qdf_assert_always(0);
  1720. }
  1721. peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
  1722. hw_peer_id, vdev_id);
  1723. if (peer) {
  1724. if (wlan_op_mode_sta == peer->vdev->opmode &&
  1725. qdf_mem_cmp(peer->mac_addr.raw,
  1726. peer->vdev->mac_addr.raw,
  1727. QDF_MAC_ADDR_SIZE) != 0) {
  1728. dp_info("STA vdev bss_peer!!!!");
  1729. peer->bss_peer = 1;
  1730. }
  1731. if (peer->vdev->opmode == wlan_op_mode_sta) {
  1732. peer->vdev->bss_ast_hash = ast_hash;
  1733. peer->vdev->bss_ast_idx = hw_peer_id;
  1734. }
  1735. /* Add ast entry incase self ast entry is
  1736. * deleted due to DP CP sync issue
  1737. *
  1738. * self_ast_entry is modified in peer create
  1739. * and peer unmap path which cannot run in
  1740. * parllel with peer map, no lock need before
  1741. * referring it
  1742. */
  1743. if (!peer->self_ast_entry) {
  1744. dp_info("Add self ast from map %pM",
  1745. peer_mac_addr);
  1746. dp_peer_add_ast(soc, peer,
  1747. peer_mac_addr,
  1748. type, 0);
  1749. }
  1750. }
  1751. err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
  1752. vdev_id, ast_hash, is_wds);
  1753. }
  1754. return err;
  1755. }
  1756. /**
  1757. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  1758. * @soc_handle - genereic soc handle
  1759. * @peeri_id - peer_id from firmware
  1760. * @vdev_id - vdev ID
  1761. * @mac_addr - mac address of the peer or wds entry
  1762. * @is_wds - flag to indicate peer map event for WDS ast entry
  1763. * @free_wds_count - number of wds entries freed by FW with peer delete
  1764. *
  1765. * Return: none
  1766. */
  1767. void
  1768. dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  1769. uint8_t vdev_id, uint8_t *mac_addr,
  1770. uint8_t is_wds, uint32_t free_wds_count)
  1771. {
  1772. struct dp_peer *peer;
  1773. struct dp_vdev *vdev = NULL;
  1774. peer = __dp_peer_find_by_id(soc, peer_id);
  1775. /*
  1776. * Currently peer IDs are assigned for vdevs as well as peers.
  1777. * If the peer ID is for a vdev, then the peer pointer stored
  1778. * in peer_id_to_obj_map will be NULL.
  1779. */
  1780. if (!peer) {
  1781. dp_err("Received unmap event for invalid peer_id %u",
  1782. peer_id);
  1783. return;
  1784. }
  1785. /* If V2 Peer map messages are enabled AST entry has to be freed here
  1786. */
  1787. if (is_wds) {
  1788. if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr)) {
  1789. return;
  1790. }
  1791. dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
  1792. peer, peer->peer_id,
  1793. peer->mac_addr.raw, mac_addr, vdev_id,
  1794. is_wds);
  1795. return;
  1796. } else {
  1797. dp_peer_clean_wds_entries(soc, peer, free_wds_count);
  1798. }
  1799. dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
  1800. soc, peer_id, peer);
  1801. dp_peer_find_id_to_obj_remove(soc, peer_id);
  1802. peer->peer_id = HTT_INVALID_PEER;
  1803. /*
  1804. * Reset ast flow mapping table
  1805. */
  1806. dp_peer_reset_flowq_map(peer);
  1807. if (soc->cdp_soc.ol_ops->peer_unmap_event) {
  1808. soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
  1809. peer_id, vdev_id);
  1810. }
  1811. vdev = peer->vdev;
  1812. /* cleanup the peer data */
  1813. dp_peer_cleanup(vdev, peer);
  1814. DP_UPDATE_STATS(vdev, peer);
  1815. qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
  1816. TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer, inactive_list_elem);
  1817. qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
  1818. /*
  1819. * Remove a reference to the peer.
  1820. * If there are no more references, delete the peer object.
  1821. */
  1822. dp_peer_unref_delete(peer);
  1823. }
  1824. void
  1825. dp_peer_find_detach(struct dp_soc *soc)
  1826. {
  1827. dp_peer_find_map_detach(soc);
  1828. dp_peer_find_hash_detach(soc);
  1829. dp_peer_ast_hash_detach(soc);
  1830. dp_peer_ast_table_detach(soc);
  1831. }
  1832. static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
  1833. union hal_reo_status *reo_status)
  1834. {
  1835. struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
  1836. if ((reo_status->rx_queue_status.header.status !=
  1837. HAL_REO_CMD_SUCCESS) &&
  1838. (reo_status->rx_queue_status.header.status !=
  1839. HAL_REO_CMD_DRAIN)) {
  1840. /* Should not happen normally. Just print error for now */
  1841. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1842. "%s: Rx tid HW desc update failed(%d): tid %d",
  1843. __func__,
  1844. reo_status->rx_queue_status.header.status,
  1845. rx_tid->tid);
  1846. }
  1847. }
  1848. /*
  1849. * dp_find_peer_by_addr - find peer instance by mac address
  1850. * @dev: physical device instance
  1851. * @peer_mac_addr: peer mac address
  1852. *
  1853. * Return: peer instance pointer
  1854. */
  1855. void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr)
  1856. {
  1857. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  1858. struct dp_peer *peer;
  1859. peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
  1860. if (!peer)
  1861. return NULL;
  1862. dp_verbose_debug("peer %pK mac: %pM", peer,
  1863. peer->mac_addr.raw);
  1864. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  1865. * Decrement it here.
  1866. */
  1867. dp_peer_unref_delete(peer);
  1868. return peer;
  1869. }
  1870. static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
  1871. {
  1872. struct ol_if_ops *ol_ops = NULL;
  1873. bool is_roaming = false;
  1874. uint8_t vdev_id = -1;
  1875. struct cdp_soc_t *soc;
  1876. if (!peer) {
  1877. dp_info("Peer is NULL. No roaming possible");
  1878. return false;
  1879. }
  1880. soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
  1881. ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
  1882. if (ol_ops && ol_ops->is_roam_inprogress) {
  1883. dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
  1884. is_roaming = ol_ops->is_roam_inprogress(vdev_id);
  1885. }
  1886. dp_info("peer: %pM, vdev_id: %d, is_roaming: %d",
  1887. peer->mac_addr.raw, vdev_id, is_roaming);
  1888. return is_roaming;
  1889. }
  1890. QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
  1891. ba_window_size, uint32_t start_seq)
  1892. {
  1893. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  1894. struct dp_soc *soc = peer->vdev->pdev->soc;
  1895. struct hal_reo_cmd_params params;
  1896. qdf_mem_zero(&params, sizeof(params));
  1897. params.std.need_status = 1;
  1898. params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
  1899. params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  1900. params.u.upd_queue_params.update_ba_window_size = 1;
  1901. params.u.upd_queue_params.ba_window_size = ba_window_size;
  1902. if (start_seq < IEEE80211_SEQ_MAX) {
  1903. params.u.upd_queue_params.update_ssn = 1;
  1904. params.u.upd_queue_params.ssn = start_seq;
  1905. } else {
  1906. dp_set_ssn_valid_flag(&params, 0);
  1907. }
  1908. if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
  1909. dp_rx_tid_update_cb, rx_tid)) {
  1910. dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
  1911. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  1912. }
  1913. rx_tid->ba_win_size = ba_window_size;
  1914. if (dp_get_peer_vdev_roaming_in_progress(peer))
  1915. return QDF_STATUS_E_PERM;
  1916. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
  1917. soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  1918. soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
  1919. peer->vdev->vdev_id, peer->mac_addr.raw,
  1920. rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
  1921. return QDF_STATUS_SUCCESS;
  1922. }
  1923. /*
  1924. * dp_reo_desc_free() - Callback free reo descriptor memory after
  1925. * HW cache flush
  1926. *
  1927. * @soc: DP SOC handle
  1928. * @cb_ctxt: Callback context
  1929. * @reo_status: REO command status
  1930. */
  1931. static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
  1932. union hal_reo_status *reo_status)
  1933. {
  1934. struct reo_desc_list_node *freedesc =
  1935. (struct reo_desc_list_node *)cb_ctxt;
  1936. struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
  1937. unsigned long curr_ts = qdf_get_system_timestamp();
  1938. if ((reo_status->fl_cache_status.header.status !=
  1939. HAL_REO_CMD_SUCCESS) &&
  1940. (reo_status->fl_cache_status.header.status !=
  1941. HAL_REO_CMD_DRAIN)) {
  1942. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1943. "%s: Rx tid HW desc flush failed(%d): tid %d",
  1944. __func__,
  1945. reo_status->rx_queue_status.header.status,
  1946. freedesc->rx_tid.tid);
  1947. }
  1948. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1949. "%s:%lu hw_qdesc_paddr: %pK, tid:%d", __func__,
  1950. curr_ts,
  1951. (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
  1952. qdf_mem_unmap_nbytes_single(soc->osdev,
  1953. rx_tid->hw_qdesc_paddr,
  1954. QDF_DMA_BIDIRECTIONAL,
  1955. rx_tid->hw_qdesc_alloc_size);
  1956. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1957. qdf_mem_free(freedesc);
  1958. }
  1959. #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
  1960. /* Hawkeye emulation requires bus address to be >= 0x50000000 */
  1961. static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
  1962. {
  1963. if (dma_addr < 0x50000000)
  1964. return QDF_STATUS_E_FAILURE;
  1965. else
  1966. return QDF_STATUS_SUCCESS;
  1967. }
  1968. #else
  1969. static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
  1970. {
  1971. return QDF_STATUS_SUCCESS;
  1972. }
  1973. #endif
  1974. /*
  1975. * dp_rx_tid_setup_wifi3() – Setup receive TID state
  1976. * @peer: Datapath peer handle
  1977. * @tid: TID
  1978. * @ba_window_size: BlockAck window size
  1979. * @start_seq: Starting sequence number
  1980. *
  1981. * Return: QDF_STATUS code
  1982. */
  1983. QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
  1984. uint32_t ba_window_size, uint32_t start_seq)
  1985. {
  1986. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  1987. struct dp_vdev *vdev = peer->vdev;
  1988. struct dp_soc *soc = vdev->pdev->soc;
  1989. uint32_t hw_qdesc_size;
  1990. uint32_t hw_qdesc_align;
  1991. int hal_pn_type;
  1992. void *hw_qdesc_vaddr;
  1993. uint32_t alloc_tries = 0;
  1994. QDF_STATUS err = QDF_STATUS_SUCCESS;
  1995. if (peer->delete_in_progress ||
  1996. !qdf_atomic_read(&peer->is_default_route_set))
  1997. return QDF_STATUS_E_FAILURE;
  1998. rx_tid->ba_win_size = ba_window_size;
  1999. if (rx_tid->hw_qdesc_vaddr_unaligned)
  2000. return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
  2001. start_seq);
  2002. rx_tid->delba_tx_status = 0;
  2003. rx_tid->ppdu_id_2k = 0;
  2004. rx_tid->num_of_addba_req = 0;
  2005. rx_tid->num_of_delba_req = 0;
  2006. rx_tid->num_of_addba_resp = 0;
  2007. rx_tid->num_addba_rsp_failed = 0;
  2008. rx_tid->num_addba_rsp_success = 0;
  2009. rx_tid->delba_tx_success_cnt = 0;
  2010. rx_tid->delba_tx_fail_cnt = 0;
  2011. rx_tid->statuscode = 0;
  2012. /* TODO: Allocating HW queue descriptors based on max BA window size
  2013. * for all QOS TIDs so that same descriptor can be used later when
  2014. * ADDBA request is recevied. This should be changed to allocate HW
  2015. * queue descriptors based on BA window size being negotiated (0 for
  2016. * non BA cases), and reallocate when BA window size changes and also
  2017. * send WMI message to FW to change the REO queue descriptor in Rx
  2018. * peer entry as part of dp_rx_tid_update.
  2019. */
  2020. if (tid != DP_NON_QOS_TID)
  2021. hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
  2022. HAL_RX_MAX_BA_WINDOW, tid);
  2023. else
  2024. hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
  2025. ba_window_size, tid);
  2026. hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
  2027. /* To avoid unnecessary extra allocation for alignment, try allocating
  2028. * exact size and see if we already have aligned address.
  2029. */
  2030. rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
  2031. try_desc_alloc:
  2032. rx_tid->hw_qdesc_vaddr_unaligned =
  2033. qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
  2034. if (!rx_tid->hw_qdesc_vaddr_unaligned) {
  2035. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2036. "%s: Rx tid HW desc alloc failed: tid %d",
  2037. __func__, tid);
  2038. return QDF_STATUS_E_NOMEM;
  2039. }
  2040. if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
  2041. hw_qdesc_align) {
  2042. /* Address allocated above is not alinged. Allocate extra
  2043. * memory for alignment
  2044. */
  2045. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  2046. rx_tid->hw_qdesc_vaddr_unaligned =
  2047. qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
  2048. hw_qdesc_align - 1);
  2049. if (!rx_tid->hw_qdesc_vaddr_unaligned) {
  2050. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2051. "%s: Rx tid HW desc alloc failed: tid %d",
  2052. __func__, tid);
  2053. return QDF_STATUS_E_NOMEM;
  2054. }
  2055. hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
  2056. rx_tid->hw_qdesc_vaddr_unaligned,
  2057. hw_qdesc_align);
  2058. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2059. "%s: Total Size %d Aligned Addr %pK",
  2060. __func__, rx_tid->hw_qdesc_alloc_size,
  2061. hw_qdesc_vaddr);
  2062. } else {
  2063. hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
  2064. }
  2065. /* TODO: Ensure that sec_type is set before ADDBA is received.
  2066. * Currently this is set based on htt indication
  2067. * HTT_T2H_MSG_TYPE_SEC_IND from target
  2068. */
  2069. switch (peer->security[dp_sec_ucast].sec_type) {
  2070. case cdp_sec_type_tkip_nomic:
  2071. case cdp_sec_type_aes_ccmp:
  2072. case cdp_sec_type_aes_ccmp_256:
  2073. case cdp_sec_type_aes_gcmp:
  2074. case cdp_sec_type_aes_gcmp_256:
  2075. hal_pn_type = HAL_PN_WPA;
  2076. break;
  2077. case cdp_sec_type_wapi:
  2078. if (vdev->opmode == wlan_op_mode_ap)
  2079. hal_pn_type = HAL_PN_WAPI_EVEN;
  2080. else
  2081. hal_pn_type = HAL_PN_WAPI_UNEVEN;
  2082. break;
  2083. default:
  2084. hal_pn_type = HAL_PN_NONE;
  2085. break;
  2086. }
  2087. hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
  2088. hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
  2089. qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
  2090. QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
  2091. &(rx_tid->hw_qdesc_paddr));
  2092. if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
  2093. QDF_STATUS_SUCCESS) {
  2094. if (alloc_tries++ < 10) {
  2095. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  2096. rx_tid->hw_qdesc_vaddr_unaligned = NULL;
  2097. goto try_desc_alloc;
  2098. } else {
  2099. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2100. "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
  2101. __func__, tid);
  2102. err = QDF_STATUS_E_NOMEM;
  2103. goto error;
  2104. }
  2105. }
  2106. if (dp_get_peer_vdev_roaming_in_progress(peer)) {
  2107. err = QDF_STATUS_E_PERM;
  2108. goto error;
  2109. }
  2110. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  2111. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  2112. soc->ctrl_psoc,
  2113. peer->vdev->pdev->pdev_id,
  2114. peer->vdev->vdev_id,
  2115. peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
  2116. 1, ba_window_size)) {
  2117. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2118. "%s: Failed to send reo queue setup to FW - tid %d\n",
  2119. __func__, tid);
  2120. err = QDF_STATUS_E_FAILURE;
  2121. goto error;
  2122. }
  2123. }
  2124. return 0;
  2125. error:
  2126. if (rx_tid->hw_qdesc_vaddr_unaligned) {
  2127. if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
  2128. QDF_STATUS_SUCCESS)
  2129. qdf_mem_unmap_nbytes_single(
  2130. soc->osdev,
  2131. rx_tid->hw_qdesc_paddr,
  2132. QDF_DMA_BIDIRECTIONAL,
  2133. rx_tid->hw_qdesc_alloc_size);
  2134. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  2135. rx_tid->hw_qdesc_vaddr_unaligned = NULL;
  2136. }
  2137. return err;
  2138. }
  2139. #ifdef REO_DESC_DEFER_FREE
  2140. /*
  2141. * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
  2142. * desc back to freelist and defer the deletion
  2143. *
  2144. * @soc: DP SOC handle
  2145. * @desc: Base descriptor to be freed
  2146. * @reo_status: REO command status
  2147. */
  2148. static void dp_reo_desc_clean_up(struct dp_soc *soc,
  2149. struct reo_desc_list_node *desc,
  2150. union hal_reo_status *reo_status)
  2151. {
  2152. desc->free_ts = qdf_get_system_timestamp();
  2153. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  2154. qdf_list_insert_back(&soc->reo_desc_freelist,
  2155. (qdf_list_node_t *)desc);
  2156. }
  2157. /*
  2158. * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
  2159. * ring in aviod of REO hang
  2160. *
  2161. * @list_size: REO desc list size to be cleaned
  2162. */
  2163. static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
  2164. {
  2165. unsigned long curr_ts = qdf_get_system_timestamp();
  2166. if ((*list_size) > REO_DESC_FREELIST_SIZE) {
  2167. dp_err_log("%lu:freedesc number %d in freelist",
  2168. curr_ts, *list_size);
  2169. /* limit the batch queue size */
  2170. *list_size = REO_DESC_FREELIST_SIZE;
  2171. }
  2172. }
  2173. #else
  2174. /*
  2175. * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
  2176. * cache fails free the base REO desc anyway
  2177. *
  2178. * @soc: DP SOC handle
  2179. * @desc: Base descriptor to be freed
  2180. * @reo_status: REO command status
  2181. */
  2182. static void dp_reo_desc_clean_up(struct dp_soc *soc,
  2183. struct reo_desc_list_node *desc,
  2184. union hal_reo_status *reo_status)
  2185. {
  2186. if (reo_status) {
  2187. qdf_mem_zero(reo_status, sizeof(*reo_status));
  2188. reo_status->fl_cache_status.header.status = 0;
  2189. dp_reo_desc_free(soc, (void *)desc, reo_status);
  2190. }
  2191. }
  2192. /*
  2193. * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
  2194. * ring in aviod of REO hang
  2195. *
  2196. * @list_size: REO desc list size to be cleaned
  2197. */
  2198. static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
  2199. {
  2200. }
  2201. #endif
  2202. /*
  2203. * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
  2204. * cmd and re-insert desc into free list if send fails.
  2205. *
  2206. * @soc: DP SOC handle
  2207. * @desc: desc with resend update cmd flag set
  2208. * @rx_tid: Desc RX tid associated with update cmd for resetting
  2209. * valid field to 0 in h/w
  2210. *
  2211. * Return: QDF status
  2212. */
  2213. static QDF_STATUS
  2214. dp_resend_update_reo_cmd(struct dp_soc *soc,
  2215. struct reo_desc_list_node *desc,
  2216. struct dp_rx_tid *rx_tid)
  2217. {
  2218. struct hal_reo_cmd_params params;
  2219. qdf_mem_zero(&params, sizeof(params));
  2220. params.std.need_status = 1;
  2221. params.std.addr_lo =
  2222. rx_tid->hw_qdesc_paddr & 0xffffffff;
  2223. params.std.addr_hi =
  2224. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  2225. params.u.upd_queue_params.update_vld = 1;
  2226. params.u.upd_queue_params.vld = 0;
  2227. desc->resend_update_reo_cmd = false;
  2228. /*
  2229. * If the cmd send fails then set resend_update_reo_cmd flag
  2230. * and insert the desc at the end of the free list to retry.
  2231. */
  2232. if (dp_reo_send_cmd(soc,
  2233. CMD_UPDATE_RX_REO_QUEUE,
  2234. &params,
  2235. dp_rx_tid_delete_cb,
  2236. (void *)desc)
  2237. != QDF_STATUS_SUCCESS) {
  2238. desc->resend_update_reo_cmd = true;
  2239. desc->free_ts = qdf_get_system_timestamp();
  2240. qdf_list_insert_back(&soc->reo_desc_freelist,
  2241. (qdf_list_node_t *)desc);
  2242. dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
  2243. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  2244. return QDF_STATUS_E_FAILURE;
  2245. }
  2246. return QDF_STATUS_SUCCESS;
  2247. }
  2248. /*
  2249. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  2250. * after deleting the entries (ie., setting valid=0)
  2251. *
  2252. * @soc: DP SOC handle
  2253. * @cb_ctxt: Callback context
  2254. * @reo_status: REO command status
  2255. */
  2256. void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
  2257. union hal_reo_status *reo_status)
  2258. {
  2259. struct reo_desc_list_node *freedesc =
  2260. (struct reo_desc_list_node *)cb_ctxt;
  2261. uint32_t list_size;
  2262. struct reo_desc_list_node *desc;
  2263. unsigned long curr_ts = qdf_get_system_timestamp();
  2264. uint32_t desc_size, tot_desc_size;
  2265. struct hal_reo_cmd_params params;
  2266. bool flush_failure = false;
  2267. if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
  2268. qdf_mem_zero(reo_status, sizeof(*reo_status));
  2269. reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
  2270. dp_reo_desc_free(soc, (void *)freedesc, reo_status);
  2271. DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
  2272. return;
  2273. } else if (reo_status->rx_queue_status.header.status !=
  2274. HAL_REO_CMD_SUCCESS) {
  2275. /* Should not happen normally. Just print error for now */
  2276. dp_info_rl("%s: Rx tid HW desc deletion failed(%d): tid %d",
  2277. __func__,
  2278. reo_status->rx_queue_status.header.status,
  2279. freedesc->rx_tid.tid);
  2280. }
  2281. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  2282. "%s: rx_tid: %d status: %d", __func__,
  2283. freedesc->rx_tid.tid,
  2284. reo_status->rx_queue_status.header.status);
  2285. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  2286. freedesc->free_ts = curr_ts;
  2287. qdf_list_insert_back_size(&soc->reo_desc_freelist,
  2288. (qdf_list_node_t *)freedesc, &list_size);
  2289. /* MCL path add the desc back to reo_desc_freelist when REO FLUSH
  2290. * failed. it may cause the number of REO queue pending in free
  2291. * list is even larger than REO_CMD_RING max size and lead REO CMD
  2292. * flood then cause REO HW in an unexpected condition. So it's
  2293. * needed to limit the number REO cmds in a batch operation.
  2294. */
  2295. dp_reo_limit_clean_batch_sz(&list_size);
  2296. while ((qdf_list_peek_front(&soc->reo_desc_freelist,
  2297. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
  2298. ((list_size >= REO_DESC_FREELIST_SIZE) ||
  2299. (curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
  2300. (desc->resend_update_reo_cmd && list_size))) {
  2301. struct dp_rx_tid *rx_tid;
  2302. qdf_list_remove_front(&soc->reo_desc_freelist,
  2303. (qdf_list_node_t **)&desc);
  2304. list_size--;
  2305. rx_tid = &desc->rx_tid;
  2306. /* First process descs with resend_update_reo_cmd set */
  2307. if (desc->resend_update_reo_cmd) {
  2308. if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
  2309. QDF_STATUS_SUCCESS)
  2310. break;
  2311. else
  2312. continue;
  2313. }
  2314. /* Flush and invalidate REO descriptor from HW cache: Base and
  2315. * extension descriptors should be flushed separately */
  2316. if (desc->pending_ext_desc_size)
  2317. tot_desc_size = desc->pending_ext_desc_size;
  2318. else
  2319. tot_desc_size = rx_tid->hw_qdesc_alloc_size;
  2320. /* Get base descriptor size by passing non-qos TID */
  2321. desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
  2322. DP_NON_QOS_TID);
  2323. /* Flush reo extension descriptors */
  2324. while ((tot_desc_size -= desc_size) > 0) {
  2325. qdf_mem_zero(&params, sizeof(params));
  2326. params.std.addr_lo =
  2327. ((uint64_t)(rx_tid->hw_qdesc_paddr) +
  2328. tot_desc_size) & 0xffffffff;
  2329. params.std.addr_hi =
  2330. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  2331. if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
  2332. CMD_FLUSH_CACHE,
  2333. &params,
  2334. NULL,
  2335. NULL)) {
  2336. dp_info_rl("fail to send CMD_CACHE_FLUSH:"
  2337. "tid %d desc %pK", rx_tid->tid,
  2338. (void *)(rx_tid->hw_qdesc_paddr));
  2339. desc->pending_ext_desc_size = tot_desc_size +
  2340. desc_size;
  2341. dp_reo_desc_clean_up(soc, desc, reo_status);
  2342. flush_failure = true;
  2343. break;
  2344. }
  2345. }
  2346. if (flush_failure)
  2347. break;
  2348. else
  2349. desc->pending_ext_desc_size = desc_size;
  2350. /* Flush base descriptor */
  2351. qdf_mem_zero(&params, sizeof(params));
  2352. params.std.need_status = 1;
  2353. params.std.addr_lo =
  2354. (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
  2355. params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  2356. if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
  2357. CMD_FLUSH_CACHE,
  2358. &params,
  2359. dp_reo_desc_free,
  2360. (void *)desc)) {
  2361. union hal_reo_status reo_status;
  2362. /*
  2363. * If dp_reo_send_cmd return failure, related TID queue desc
  2364. * should be unmapped. Also locally reo_desc, together with
  2365. * TID queue desc also need to be freed accordingly.
  2366. *
  2367. * Here invoke desc_free function directly to do clean up.
  2368. *
  2369. * In case of MCL path add the desc back to the free
  2370. * desc list and defer deletion.
  2371. */
  2372. dp_info_rl("%s: fail to send REO cmd to flush cache: tid %d",
  2373. __func__, rx_tid->tid);
  2374. dp_reo_desc_clean_up(soc, desc, &reo_status);
  2375. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  2376. break;
  2377. }
  2378. }
  2379. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  2380. }
  2381. /*
  2382. * dp_rx_tid_delete_wifi3() – Delete receive TID queue
  2383. * @peer: Datapath peer handle
  2384. * @tid: TID
  2385. *
  2386. * Return: 0 on success, error code on failure
  2387. */
  2388. static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
  2389. {
  2390. struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
  2391. struct dp_soc *soc = peer->vdev->pdev->soc;
  2392. struct hal_reo_cmd_params params;
  2393. struct reo_desc_list_node *freedesc =
  2394. qdf_mem_malloc(sizeof(*freedesc));
  2395. if (!freedesc) {
  2396. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2397. "%s: malloc failed for freedesc: tid %d",
  2398. __func__, tid);
  2399. return -ENOMEM;
  2400. }
  2401. freedesc->rx_tid = *rx_tid;
  2402. freedesc->resend_update_reo_cmd = false;
  2403. qdf_mem_zero(&params, sizeof(params));
  2404. params.std.need_status = 1;
  2405. params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
  2406. params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  2407. params.u.upd_queue_params.update_vld = 1;
  2408. params.u.upd_queue_params.vld = 0;
  2409. if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
  2410. dp_rx_tid_delete_cb, (void *)freedesc)
  2411. != QDF_STATUS_SUCCESS) {
  2412. /* Defer the clean up to the call back context */
  2413. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  2414. freedesc->free_ts = qdf_get_system_timestamp();
  2415. freedesc->resend_update_reo_cmd = true;
  2416. qdf_list_insert_front(&soc->reo_desc_freelist,
  2417. (qdf_list_node_t *)freedesc);
  2418. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  2419. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  2420. dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
  2421. }
  2422. rx_tid->hw_qdesc_vaddr_unaligned = NULL;
  2423. rx_tid->hw_qdesc_alloc_size = 0;
  2424. rx_tid->hw_qdesc_paddr = 0;
  2425. return 0;
  2426. }
  2427. #ifdef DP_LFR
  2428. static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
  2429. {
  2430. int tid;
  2431. for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
  2432. dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
  2433. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2434. "Setting up TID %d for peer %pK peer->local_id %d",
  2435. tid, peer, peer->local_id);
  2436. }
  2437. }
  2438. #else
  2439. static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
  2440. #endif
  2441. /*
  2442. * dp_peer_tx_init() – Initialize receive TID state
  2443. * @pdev: Datapath pdev
  2444. * @peer: Datapath peer
  2445. *
  2446. */
  2447. void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
  2448. {
  2449. dp_peer_tid_queue_init(peer);
  2450. dp_peer_update_80211_hdr(peer->vdev, peer);
  2451. }
  2452. /*
  2453. * dp_peer_tx_cleanup() – Deinitialize receive TID state
  2454. * @vdev: Datapath vdev
  2455. * @peer: Datapath peer
  2456. *
  2457. */
  2458. static inline void
  2459. dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
  2460. {
  2461. dp_peer_tid_queue_cleanup(peer);
  2462. }
  2463. /*
  2464. * dp_peer_rx_init() – Initialize receive TID state
  2465. * @pdev: Datapath pdev
  2466. * @peer: Datapath peer
  2467. *
  2468. */
  2469. void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
  2470. {
  2471. int tid;
  2472. struct dp_rx_tid *rx_tid;
  2473. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  2474. rx_tid = &peer->rx_tid[tid];
  2475. rx_tid->array = &rx_tid->base;
  2476. rx_tid->base.head = rx_tid->base.tail = NULL;
  2477. rx_tid->tid = tid;
  2478. rx_tid->defrag_timeout_ms = 0;
  2479. rx_tid->ba_win_size = 0;
  2480. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2481. rx_tid->defrag_waitlist_elem.tqe_next = NULL;
  2482. rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
  2483. }
  2484. peer->active_ba_session_cnt = 0;
  2485. peer->hw_buffer_size = 0;
  2486. peer->kill_256_sessions = 0;
  2487. /* Setup default (non-qos) rx tid queue */
  2488. dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
  2489. /* Setup rx tid queue for TID 0.
  2490. * Other queues will be setup on receiving first packet, which will cause
  2491. * NULL REO queue error
  2492. */
  2493. dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
  2494. /*
  2495. * Setup the rest of TID's to handle LFR
  2496. */
  2497. dp_peer_setup_remaining_tids(peer);
  2498. /*
  2499. * Set security defaults: no PN check, no security. The target may
  2500. * send a HTT SEC_IND message to overwrite these defaults.
  2501. */
  2502. peer->security[dp_sec_ucast].sec_type =
  2503. peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
  2504. }
  2505. /*
  2506. * dp_peer_rx_cleanup() – Cleanup receive TID state
  2507. * @vdev: Datapath vdev
  2508. * @peer: Datapath peer
  2509. *
  2510. */
  2511. void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
  2512. {
  2513. int tid;
  2514. uint32_t tid_delete_mask = 0;
  2515. dp_info("Remove tids for peer: %pK", peer);
  2516. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  2517. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  2518. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2519. if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
  2520. /* Cleanup defrag related resource */
  2521. dp_rx_defrag_waitlist_remove(peer, tid);
  2522. dp_rx_reorder_flush_frag(peer, tid);
  2523. }
  2524. if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
  2525. dp_rx_tid_delete_wifi3(peer, tid);
  2526. tid_delete_mask |= (1 << tid);
  2527. }
  2528. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2529. }
  2530. #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
  2531. if (soc->ol_ops->peer_rx_reorder_queue_remove) {
  2532. soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
  2533. peer->vdev->pdev->pdev_id,
  2534. peer->vdev->vdev_id, peer->mac_addr.raw,
  2535. tid_delete_mask);
  2536. }
  2537. #endif
  2538. for (tid = 0; tid < DP_MAX_TIDS; tid++)
  2539. qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
  2540. }
  2541. #ifdef FEATURE_PERPKT_INFO
  2542. /*
  2543. * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
  2544. * @peer: Datapath peer
  2545. *
  2546. * return: void
  2547. */
  2548. void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
  2549. {
  2550. qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
  2551. sizeof(struct cdp_delayed_tx_completion_ppdu_user));
  2552. peer->last_delayed_ba = false;
  2553. peer->last_delayed_ba_ppduid = 0;
  2554. }
  2555. #else
  2556. /*
  2557. * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
  2558. * @peer: Datapath peer
  2559. *
  2560. * return: void
  2561. */
  2562. void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
  2563. {
  2564. }
  2565. #endif
  2566. /*
  2567. * dp_peer_cleanup() – Cleanup peer information
  2568. * @vdev: Datapath vdev
  2569. * @peer: Datapath peer
  2570. *
  2571. */
  2572. void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
  2573. {
  2574. enum wlan_op_mode vdev_opmode;
  2575. uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
  2576. struct dp_pdev *pdev = vdev->pdev;
  2577. struct dp_soc *soc = pdev->soc;
  2578. dp_peer_tx_cleanup(vdev, peer);
  2579. /* cleanup the Rx reorder queues for this peer */
  2580. dp_peer_rx_cleanup(vdev, peer);
  2581. /* save vdev related member in case vdev freed */
  2582. vdev_opmode = vdev->opmode;
  2583. qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
  2584. QDF_MAC_ADDR_SIZE);
  2585. if (soc->cdp_soc.ol_ops->peer_unref_delete)
  2586. soc->cdp_soc.ol_ops->peer_unref_delete(
  2587. soc->ctrl_psoc,
  2588. vdev->pdev->pdev_id,
  2589. peer->mac_addr.raw, vdev_mac_addr,
  2590. vdev_opmode);
  2591. }
  2592. /* dp_teardown_256_ba_session() - Teardown sessions using 256
  2593. * window size when a request with
  2594. * 64 window size is received.
  2595. * This is done as a WAR since HW can
  2596. * have only one setting per peer (64 or 256).
  2597. * For HKv2, we use per tid buffersize setting
  2598. * for 0 to per_tid_basize_max_tid. For tid
  2599. * more than per_tid_basize_max_tid we use HKv1
  2600. * method.
  2601. * @peer: Datapath peer
  2602. *
  2603. * Return: void
  2604. */
  2605. static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
  2606. {
  2607. uint8_t delba_rcode = 0;
  2608. int tid;
  2609. struct dp_rx_tid *rx_tid = NULL;
  2610. tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
  2611. for (; tid < DP_MAX_TIDS; tid++) {
  2612. rx_tid = &peer->rx_tid[tid];
  2613. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2614. if (rx_tid->ba_win_size <= 64) {
  2615. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2616. continue;
  2617. } else {
  2618. if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
  2619. rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2620. /* send delba */
  2621. if (!rx_tid->delba_tx_status) {
  2622. rx_tid->delba_tx_retry++;
  2623. rx_tid->delba_tx_status = 1;
  2624. rx_tid->delba_rcode =
  2625. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  2626. delba_rcode = rx_tid->delba_rcode;
  2627. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2628. if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
  2629. peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
  2630. peer->vdev->pdev->soc->ctrl_psoc,
  2631. peer->vdev->vdev_id,
  2632. peer->mac_addr.raw,
  2633. tid, delba_rcode);
  2634. } else {
  2635. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2636. }
  2637. } else {
  2638. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2639. }
  2640. }
  2641. }
  2642. }
  2643. /*
  2644. * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
  2645. *
  2646. * @soc: Datapath soc handle
  2647. * @peer_mac: Datapath peer mac address
  2648. * @vdev_id: id of atapath vdev
  2649. * @tid: TID number
  2650. * @status: tx completion status
  2651. * Return: 0 on success, error code on failure
  2652. */
  2653. int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
  2654. uint8_t *peer_mac,
  2655. uint16_t vdev_id,
  2656. uint8_t tid, int status)
  2657. {
  2658. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2659. peer_mac, 0, vdev_id);
  2660. struct dp_rx_tid *rx_tid = NULL;
  2661. if (!peer || peer->delete_in_progress) {
  2662. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2663. "%s: Peer is NULL!\n", __func__);
  2664. goto fail;
  2665. }
  2666. rx_tid = &peer->rx_tid[tid];
  2667. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2668. if (status) {
  2669. rx_tid->num_addba_rsp_failed++;
  2670. dp_rx_tid_update_wifi3(peer, tid, 1,
  2671. IEEE80211_SEQ_MAX);
  2672. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2673. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2674. dp_err("RxTid- %d addba rsp tx completion failed", tid);
  2675. goto success;
  2676. }
  2677. rx_tid->num_addba_rsp_success++;
  2678. if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
  2679. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2680. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2681. "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
  2682. __func__, tid);
  2683. goto fail;
  2684. }
  2685. if (!qdf_atomic_read(&peer->is_default_route_set)) {
  2686. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2687. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2688. "%s: default route is not set for peer: %pM",
  2689. __func__, peer->mac_addr.raw);
  2690. goto fail;
  2691. }
  2692. if (dp_rx_tid_update_wifi3(peer, tid,
  2693. rx_tid->ba_win_size,
  2694. rx_tid->startseqnum)) {
  2695. dp_err("%s: failed update REO SSN", __func__);
  2696. }
  2697. dp_info("%s: tid %u window_size %u start_seq_num %u",
  2698. __func__, tid, rx_tid->ba_win_size,
  2699. rx_tid->startseqnum);
  2700. /* First Session */
  2701. if (peer->active_ba_session_cnt == 0) {
  2702. if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
  2703. peer->hw_buffer_size = 256;
  2704. else
  2705. peer->hw_buffer_size = 64;
  2706. }
  2707. rx_tid->ba_status = DP_RX_BA_ACTIVE;
  2708. peer->active_ba_session_cnt++;
  2709. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2710. /* Kill any session having 256 buffer size
  2711. * when 64 buffer size request is received.
  2712. * Also, latch on to 64 as new buffer size.
  2713. */
  2714. if (peer->kill_256_sessions) {
  2715. dp_teardown_256_ba_sessions(peer);
  2716. peer->kill_256_sessions = 0;
  2717. }
  2718. success:
  2719. dp_peer_unref_delete(peer);
  2720. return QDF_STATUS_SUCCESS;
  2721. fail:
  2722. if (peer)
  2723. dp_peer_unref_delete(peer);
  2724. return QDF_STATUS_E_FAILURE;
  2725. }
  2726. /*
  2727. * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
  2728. *
  2729. * @soc: Datapath soc handle
  2730. * @peer_mac: Datapath peer mac address
  2731. * @vdev_id: id of atapath vdev
  2732. * @tid: TID number
  2733. * @dialogtoken: output dialogtoken
  2734. * @statuscode: output dialogtoken
  2735. * @buffersize: Output BA window size
  2736. * @batimeout: Output BA timeout
  2737. */
  2738. QDF_STATUS
  2739. dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  2740. uint16_t vdev_id, uint8_t tid,
  2741. uint8_t *dialogtoken, uint16_t *statuscode,
  2742. uint16_t *buffersize, uint16_t *batimeout)
  2743. {
  2744. struct dp_rx_tid *rx_tid = NULL;
  2745. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2746. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2747. peer_mac, 0, vdev_id);
  2748. if (!peer || peer->delete_in_progress) {
  2749. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2750. "%s: Peer is NULL!\n", __func__);
  2751. status = QDF_STATUS_E_FAILURE;
  2752. goto fail;
  2753. }
  2754. rx_tid = &peer->rx_tid[tid];
  2755. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2756. rx_tid->num_of_addba_resp++;
  2757. /* setup ADDBA response parameters */
  2758. *dialogtoken = rx_tid->dialogtoken;
  2759. *statuscode = rx_tid->statuscode;
  2760. *buffersize = rx_tid->ba_win_size;
  2761. *batimeout = 0;
  2762. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2763. fail:
  2764. if (peer)
  2765. dp_peer_unref_delete(peer);
  2766. return status;
  2767. }
  2768. /* dp_check_ba_buffersize() - Check buffer size in request
  2769. * and latch onto this size based on
  2770. * size used in first active session.
  2771. * @peer: Datapath peer
  2772. * @tid: Tid
  2773. * @buffersize: Block ack window size
  2774. *
  2775. * Return: void
  2776. */
  2777. static void dp_check_ba_buffersize(struct dp_peer *peer,
  2778. uint16_t tid,
  2779. uint16_t buffersize)
  2780. {
  2781. struct dp_rx_tid *rx_tid = NULL;
  2782. rx_tid = &peer->rx_tid[tid];
  2783. if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
  2784. tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
  2785. rx_tid->ba_win_size = buffersize;
  2786. return;
  2787. } else {
  2788. if (peer->active_ba_session_cnt == 0) {
  2789. rx_tid->ba_win_size = buffersize;
  2790. } else {
  2791. if (peer->hw_buffer_size == 64) {
  2792. if (buffersize <= 64)
  2793. rx_tid->ba_win_size = buffersize;
  2794. else
  2795. rx_tid->ba_win_size = peer->hw_buffer_size;
  2796. } else if (peer->hw_buffer_size == 256) {
  2797. if (buffersize > 64) {
  2798. rx_tid->ba_win_size = buffersize;
  2799. } else {
  2800. rx_tid->ba_win_size = buffersize;
  2801. peer->hw_buffer_size = 64;
  2802. peer->kill_256_sessions = 1;
  2803. }
  2804. }
  2805. }
  2806. }
  2807. }
  2808. #define DP_RX_BA_SESSION_DISABLE 1
  2809. /*
  2810. * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
  2811. *
  2812. * @soc: Datapath soc handle
  2813. * @peer_mac: Datapath peer mac address
  2814. * @vdev_id: id of atapath vdev
  2815. * @dialogtoken: dialogtoken from ADDBA frame
  2816. * @tid: TID number
  2817. * @batimeout: BA timeout
  2818. * @buffersize: BA window size
  2819. * @startseqnum: Start seq. number received in BA sequence control
  2820. *
  2821. * Return: 0 on success, error code on failure
  2822. */
  2823. int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
  2824. uint8_t *peer_mac,
  2825. uint16_t vdev_id,
  2826. uint8_t dialogtoken,
  2827. uint16_t tid, uint16_t batimeout,
  2828. uint16_t buffersize,
  2829. uint16_t startseqnum)
  2830. {
  2831. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2832. struct dp_rx_tid *rx_tid = NULL;
  2833. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2834. peer_mac, 0, vdev_id);
  2835. if (!peer || peer->delete_in_progress) {
  2836. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2837. "%s: Peer is NULL!\n", __func__);
  2838. status = QDF_STATUS_E_FAILURE;
  2839. goto fail;
  2840. }
  2841. rx_tid = &peer->rx_tid[tid];
  2842. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2843. rx_tid->num_of_addba_req++;
  2844. if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
  2845. rx_tid->hw_qdesc_vaddr_unaligned)) {
  2846. dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  2847. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2848. peer->active_ba_session_cnt--;
  2849. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2850. "%s: Rx Tid- %d hw qdesc is already setup",
  2851. __func__, tid);
  2852. }
  2853. if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2854. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2855. status = QDF_STATUS_E_FAILURE;
  2856. goto fail;
  2857. }
  2858. if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
  2859. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2860. "%s disable BA session",
  2861. __func__);
  2862. buffersize = 1;
  2863. } else if (rx_tid->rx_ba_win_size_override) {
  2864. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2865. "%s override BA win to %d", __func__,
  2866. rx_tid->rx_ba_win_size_override);
  2867. buffersize = rx_tid->rx_ba_win_size_override;
  2868. } else {
  2869. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2870. "%s restore BA win %d based on addba req",
  2871. __func__, buffersize);
  2872. }
  2873. dp_check_ba_buffersize(peer, tid, buffersize);
  2874. if (dp_rx_tid_setup_wifi3(peer, tid,
  2875. rx_tid->ba_win_size, startseqnum)) {
  2876. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2877. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2878. status = QDF_STATUS_E_FAILURE;
  2879. goto fail;
  2880. }
  2881. rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
  2882. rx_tid->dialogtoken = dialogtoken;
  2883. rx_tid->startseqnum = startseqnum;
  2884. if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
  2885. rx_tid->statuscode = rx_tid->userstatuscode;
  2886. else
  2887. rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
  2888. if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
  2889. rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
  2890. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2891. fail:
  2892. if (peer)
  2893. dp_peer_unref_delete(peer);
  2894. return status;
  2895. }
  2896. /*
  2897. * dp_set_addba_response() – Set a user defined ADDBA response status code
  2898. *
  2899. * @soc: Datapath soc handle
  2900. * @peer_mac: Datapath peer mac address
  2901. * @vdev_id: id of atapath vdev
  2902. * @tid: TID number
  2903. * @statuscode: response status code to be set
  2904. */
  2905. QDF_STATUS
  2906. dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  2907. uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
  2908. {
  2909. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2910. peer_mac, 0, vdev_id);
  2911. struct dp_rx_tid *rx_tid;
  2912. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2913. if (!peer || peer->delete_in_progress) {
  2914. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2915. "%s: Peer is NULL!\n", __func__);
  2916. status = QDF_STATUS_E_FAILURE;
  2917. goto fail;
  2918. }
  2919. rx_tid = &peer->rx_tid[tid];
  2920. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2921. rx_tid->userstatuscode = statuscode;
  2922. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2923. fail:
  2924. if (peer)
  2925. dp_peer_unref_delete(peer);
  2926. return status;
  2927. }
  2928. /*
  2929. * dp_rx_delba_process_wifi3() – Process DELBA from peer
  2930. * @soc: Datapath soc handle
  2931. * @peer_mac: Datapath peer mac address
  2932. * @vdev_id: id of atapath vdev
  2933. * @tid: TID number
  2934. * @reasoncode: Reason code received in DELBA frame
  2935. *
  2936. * Return: 0 on success, error code on failure
  2937. */
  2938. int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  2939. uint16_t vdev_id, int tid, uint16_t reasoncode)
  2940. {
  2941. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2942. struct dp_rx_tid *rx_tid;
  2943. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2944. peer_mac, 0, vdev_id);
  2945. if (!peer || peer->delete_in_progress) {
  2946. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2947. "%s: Peer is NULL!\n", __func__);
  2948. status = QDF_STATUS_E_FAILURE;
  2949. goto fail;
  2950. }
  2951. rx_tid = &peer->rx_tid[tid];
  2952. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2953. if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
  2954. rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2955. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2956. status = QDF_STATUS_E_FAILURE;
  2957. goto fail;
  2958. }
  2959. /* TODO: See if we can delete the existing REO queue descriptor and
  2960. * replace with a new one without queue extenstion descript to save
  2961. * memory
  2962. */
  2963. rx_tid->delba_rcode = reasoncode;
  2964. rx_tid->num_of_delba_req++;
  2965. dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  2966. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2967. peer->active_ba_session_cnt--;
  2968. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2969. fail:
  2970. if (peer)
  2971. dp_peer_unref_delete(peer);
  2972. return status;
  2973. }
  2974. /*
  2975. * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
  2976. *
  2977. * @soc: Datapath soc handle
  2978. * @peer_mac: Datapath peer mac address
  2979. * @vdev_id: id of atapath vdev
  2980. * @tid: TID number
  2981. * @status: tx completion status
  2982. * Return: 0 on success, error code on failure
  2983. */
  2984. int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  2985. uint16_t vdev_id,
  2986. uint8_t tid, int status)
  2987. {
  2988. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  2989. struct dp_rx_tid *rx_tid = NULL;
  2990. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2991. peer_mac, 0, vdev_id);
  2992. if (!peer || peer->delete_in_progress) {
  2993. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2994. "%s: Peer is NULL!", __func__);
  2995. ret = QDF_STATUS_E_FAILURE;
  2996. goto end;
  2997. }
  2998. rx_tid = &peer->rx_tid[tid];
  2999. qdf_spin_lock_bh(&rx_tid->tid_lock);
  3000. if (status) {
  3001. rx_tid->delba_tx_fail_cnt++;
  3002. if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
  3003. rx_tid->delba_tx_retry = 0;
  3004. rx_tid->delba_tx_status = 0;
  3005. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  3006. } else {
  3007. rx_tid->delba_tx_retry++;
  3008. rx_tid->delba_tx_status = 1;
  3009. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  3010. if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
  3011. peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
  3012. peer->vdev->pdev->soc->ctrl_psoc,
  3013. peer->vdev->vdev_id,
  3014. peer->mac_addr.raw, tid,
  3015. rx_tid->delba_rcode);
  3016. }
  3017. goto end;
  3018. } else {
  3019. rx_tid->delba_tx_success_cnt++;
  3020. rx_tid->delba_tx_retry = 0;
  3021. rx_tid->delba_tx_status = 0;
  3022. }
  3023. if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
  3024. dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  3025. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  3026. peer->active_ba_session_cnt--;
  3027. }
  3028. if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  3029. dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  3030. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  3031. }
  3032. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  3033. end:
  3034. if (peer)
  3035. dp_peer_unref_delete(peer);
  3036. return ret;
  3037. }
  3038. /**
  3039. * dp_set_pn_check_wifi3() - enable PN check in REO for security
  3040. * @soc: Datapath soc handle
  3041. * @peer_mac: Datapath peer mac address
  3042. * @vdev_id: id of atapath vdev
  3043. * @vdev: Datapath vdev
  3044. * @pdev - data path device instance
  3045. * @sec_type - security type
  3046. * @rx_pn - Receive pn starting number
  3047. *
  3048. */
  3049. QDF_STATUS
  3050. dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
  3051. uint8_t *peer_mac, enum cdp_sec_type sec_type,
  3052. uint32_t *rx_pn)
  3053. {
  3054. struct dp_pdev *pdev;
  3055. int i;
  3056. uint8_t pn_size;
  3057. struct hal_reo_cmd_params params;
  3058. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3059. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  3060. peer_mac, 0, vdev_id);
  3061. struct dp_vdev *vdev =
  3062. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  3063. vdev_id);
  3064. if (!vdev || !peer || peer->delete_in_progress) {
  3065. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3066. "%s: Peer is NULL!\n", __func__);
  3067. status = QDF_STATUS_E_FAILURE;
  3068. goto fail;
  3069. }
  3070. pdev = vdev->pdev;
  3071. qdf_mem_zero(&params, sizeof(params));
  3072. params.std.need_status = 1;
  3073. params.u.upd_queue_params.update_pn_valid = 1;
  3074. params.u.upd_queue_params.update_pn_size = 1;
  3075. params.u.upd_queue_params.update_pn = 1;
  3076. params.u.upd_queue_params.update_pn_check_needed = 1;
  3077. params.u.upd_queue_params.update_svld = 1;
  3078. params.u.upd_queue_params.svld = 0;
  3079. switch (sec_type) {
  3080. case cdp_sec_type_tkip_nomic:
  3081. case cdp_sec_type_aes_ccmp:
  3082. case cdp_sec_type_aes_ccmp_256:
  3083. case cdp_sec_type_aes_gcmp:
  3084. case cdp_sec_type_aes_gcmp_256:
  3085. params.u.upd_queue_params.pn_check_needed = 1;
  3086. params.u.upd_queue_params.pn_size = 48;
  3087. pn_size = 48;
  3088. break;
  3089. case cdp_sec_type_wapi:
  3090. params.u.upd_queue_params.pn_check_needed = 1;
  3091. params.u.upd_queue_params.pn_size = 128;
  3092. pn_size = 128;
  3093. if (vdev->opmode == wlan_op_mode_ap) {
  3094. params.u.upd_queue_params.pn_even = 1;
  3095. params.u.upd_queue_params.update_pn_even = 1;
  3096. } else {
  3097. params.u.upd_queue_params.pn_uneven = 1;
  3098. params.u.upd_queue_params.update_pn_uneven = 1;
  3099. }
  3100. break;
  3101. default:
  3102. params.u.upd_queue_params.pn_check_needed = 0;
  3103. pn_size = 0;
  3104. break;
  3105. }
  3106. for (i = 0; i < DP_MAX_TIDS; i++) {
  3107. struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
  3108. qdf_spin_lock_bh(&rx_tid->tid_lock);
  3109. if (rx_tid->hw_qdesc_vaddr_unaligned) {
  3110. params.std.addr_lo =
  3111. rx_tid->hw_qdesc_paddr & 0xffffffff;
  3112. params.std.addr_hi =
  3113. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  3114. if (pn_size) {
  3115. QDF_TRACE(QDF_MODULE_ID_DP,
  3116. QDF_TRACE_LEVEL_INFO_HIGH,
  3117. "%s PN set for TID:%d pn:%x:%x:%x:%x",
  3118. __func__, i, rx_pn[3], rx_pn[2],
  3119. rx_pn[1], rx_pn[0]);
  3120. params.u.upd_queue_params.update_pn_valid = 1;
  3121. params.u.upd_queue_params.pn_31_0 = rx_pn[0];
  3122. params.u.upd_queue_params.pn_63_32 = rx_pn[1];
  3123. params.u.upd_queue_params.pn_95_64 = rx_pn[2];
  3124. params.u.upd_queue_params.pn_127_96 = rx_pn[3];
  3125. }
  3126. rx_tid->pn_size = pn_size;
  3127. if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
  3128. CMD_UPDATE_RX_REO_QUEUE,
  3129. &params, dp_rx_tid_update_cb,
  3130. rx_tid)) {
  3131. dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
  3132. "tid %d desc %pK", rx_tid->tid,
  3133. (void *)(rx_tid->hw_qdesc_paddr));
  3134. DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
  3135. rx.err.reo_cmd_send_fail, 1);
  3136. }
  3137. } else {
  3138. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3139. "PN Check not setup for TID :%d ", i);
  3140. }
  3141. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  3142. }
  3143. fail:
  3144. if (peer)
  3145. dp_peer_unref_delete(peer);
  3146. return status;
  3147. }
  3148. /**
  3149. * dp_set_key_sec_type_wifi3() - set security mode of key
  3150. * @soc: Datapath soc handle
  3151. * @peer_mac: Datapath peer mac address
  3152. * @vdev_id: id of atapath vdev
  3153. * @vdev: Datapath vdev
  3154. * @pdev - data path device instance
  3155. * @sec_type - security type
  3156. * #is_unicast - key type
  3157. *
  3158. */
  3159. QDF_STATUS
  3160. dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
  3161. uint8_t *peer_mac, enum cdp_sec_type sec_type,
  3162. bool is_unicast)
  3163. {
  3164. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  3165. peer_mac, 0, vdev_id);
  3166. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3167. int sec_index;
  3168. if (!peer || peer->delete_in_progress) {
  3169. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3170. "%s: Peer is NULL!\n", __func__);
  3171. status = QDF_STATUS_E_FAILURE;
  3172. goto fail;
  3173. }
  3174. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3175. "key sec spec for peer %pK %pM: %s key of type %d",
  3176. peer,
  3177. peer->mac_addr.raw,
  3178. is_unicast ? "ucast" : "mcast",
  3179. sec_type);
  3180. sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
  3181. peer->security[sec_index].sec_type = sec_type;
  3182. fail:
  3183. if (peer)
  3184. dp_peer_unref_delete(peer);
  3185. return status;
  3186. }
  3187. void
  3188. dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  3189. enum cdp_sec_type sec_type, int is_unicast,
  3190. u_int32_t *michael_key,
  3191. u_int32_t *rx_pn)
  3192. {
  3193. struct dp_peer *peer;
  3194. int sec_index;
  3195. peer = dp_peer_find_by_id(soc, peer_id);
  3196. if (!peer) {
  3197. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3198. "Couldn't find peer from ID %d - skipping security inits",
  3199. peer_id);
  3200. return;
  3201. }
  3202. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3203. "sec spec for peer %pK %pM: %s key of type %d",
  3204. peer,
  3205. peer->mac_addr.raw,
  3206. is_unicast ? "ucast" : "mcast",
  3207. sec_type);
  3208. sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
  3209. peer->security[sec_index].sec_type = sec_type;
  3210. #ifdef notyet /* TODO: See if this is required for defrag support */
  3211. /* michael key only valid for TKIP, but for simplicity,
  3212. * copy it anyway
  3213. */
  3214. qdf_mem_copy(
  3215. &peer->security[sec_index].michael_key[0],
  3216. michael_key,
  3217. sizeof(peer->security[sec_index].michael_key));
  3218. #ifdef BIG_ENDIAN_HOST
  3219. OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
  3220. sizeof(peer->security[sec_index].michael_key));
  3221. #endif /* BIG_ENDIAN_HOST */
  3222. #endif
  3223. #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
  3224. if (sec_type != cdp_sec_type_wapi) {
  3225. qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
  3226. } else {
  3227. for (i = 0; i < DP_MAX_TIDS; i++) {
  3228. /*
  3229. * Setting PN valid bit for WAPI sec_type,
  3230. * since WAPI PN has to be started with predefined value
  3231. */
  3232. peer->tids_last_pn_valid[i] = 1;
  3233. qdf_mem_copy(
  3234. (u_int8_t *) &peer->tids_last_pn[i],
  3235. (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
  3236. peer->tids_last_pn[i].pn128[1] =
  3237. qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
  3238. peer->tids_last_pn[i].pn128[0] =
  3239. qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
  3240. }
  3241. }
  3242. #endif
  3243. /* TODO: Update HW TID queue with PN check parameters (pn type for
  3244. * all security types and last pn for WAPI) once REO command API
  3245. * is available
  3246. */
  3247. dp_peer_unref_delete(peer);
  3248. }
  3249. #ifdef QCA_PEER_EXT_STATS
  3250. /*
  3251. * dp_peer_ext_stats_ctx_alloc() - Allocate peer ext
  3252. * stats content
  3253. * @soc: DP SoC context
  3254. * @peer: DP peer context
  3255. *
  3256. * Allocate the peer extended stats context
  3257. *
  3258. * Return: QDF_STATUS_SUCCESS if allocation is
  3259. * successful
  3260. */
  3261. QDF_STATUS dp_peer_ext_stats_ctx_alloc(struct dp_soc *soc,
  3262. struct dp_peer *peer)
  3263. {
  3264. uint8_t tid, ctx_id;
  3265. if (!soc || !peer) {
  3266. dp_warn("Null soc%x or peer%x", soc, peer);
  3267. return QDF_STATUS_E_INVAL;
  3268. }
  3269. if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
  3270. return QDF_STATUS_SUCCESS;
  3271. /*
  3272. * Allocate memory for peer extended stats.
  3273. */
  3274. peer->pext_stats = qdf_mem_malloc(sizeof(struct cdp_peer_ext_stats));
  3275. if (!peer->pext_stats) {
  3276. dp_err("Peer extended stats obj alloc failed!!");
  3277. return QDF_STATUS_E_NOMEM;
  3278. }
  3279. for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
  3280. for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
  3281. struct cdp_delay_tx_stats *tx_delay =
  3282. &peer->pext_stats->delay_stats[tid][ctx_id].tx_delay;
  3283. struct cdp_delay_rx_stats *rx_delay =
  3284. &peer->pext_stats->delay_stats[tid][ctx_id].rx_delay;
  3285. dp_hist_init(&tx_delay->tx_swq_delay,
  3286. CDP_HIST_TYPE_SW_ENQEUE_DELAY);
  3287. dp_hist_init(&tx_delay->hwtx_delay,
  3288. CDP_HIST_TYPE_HW_COMP_DELAY);
  3289. dp_hist_init(&rx_delay->to_stack_delay,
  3290. CDP_HIST_TYPE_REAP_STACK);
  3291. }
  3292. }
  3293. return QDF_STATUS_SUCCESS;
  3294. }
  3295. /*
  3296. * dp_peer_ext_stats_ctx_dealloc() - Dealloc the peer context
  3297. * @peer: DP peer context
  3298. *
  3299. * Free the peer extended stats context
  3300. *
  3301. * Return: Void
  3302. */
  3303. void dp_peer_ext_stats_ctx_dealloc(struct dp_soc *soc, struct dp_peer *peer)
  3304. {
  3305. if (!peer) {
  3306. dp_warn("peer_ext dealloc failed due to NULL peer object");
  3307. return;
  3308. }
  3309. if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
  3310. return;
  3311. if (!peer->pext_stats)
  3312. return;
  3313. qdf_mem_free(peer->pext_stats);
  3314. peer->pext_stats = NULL;
  3315. }
  3316. #endif
  3317. QDF_STATUS
  3318. dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
  3319. uint8_t tid, uint16_t win_sz)
  3320. {
  3321. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  3322. struct dp_peer *peer;
  3323. struct dp_rx_tid *rx_tid;
  3324. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3325. peer = dp_peer_find_by_id(soc, peer_id);
  3326. if (!peer) {
  3327. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3328. "Couldn't find peer from ID %d",
  3329. peer_id);
  3330. return QDF_STATUS_E_FAILURE;
  3331. }
  3332. qdf_assert_always(tid < DP_MAX_TIDS);
  3333. rx_tid = &peer->rx_tid[tid];
  3334. if (rx_tid->hw_qdesc_vaddr_unaligned) {
  3335. if (!rx_tid->delba_tx_status) {
  3336. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3337. "%s: PEER_ID: %d TID: %d, BA win: %d ",
  3338. __func__, peer_id, tid, win_sz);
  3339. qdf_spin_lock_bh(&rx_tid->tid_lock);
  3340. rx_tid->delba_tx_status = 1;
  3341. rx_tid->rx_ba_win_size_override =
  3342. qdf_min((uint16_t)63, win_sz);
  3343. rx_tid->delba_rcode =
  3344. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  3345. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  3346. if (soc->cdp_soc.ol_ops->send_delba)
  3347. soc->cdp_soc.ol_ops->send_delba(
  3348. peer->vdev->pdev->soc->ctrl_psoc,
  3349. peer->vdev->vdev_id,
  3350. peer->mac_addr.raw,
  3351. tid,
  3352. rx_tid->delba_rcode);
  3353. }
  3354. } else {
  3355. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3356. "BA session is not setup for TID:%d ", tid);
  3357. status = QDF_STATUS_E_FAILURE;
  3358. }
  3359. dp_peer_unref_delete(peer);
  3360. return status;
  3361. }
  3362. #ifdef DP_PEER_EXTENDED_API
  3363. QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  3364. struct ol_txrx_desc_type *sta_desc)
  3365. {
  3366. struct dp_peer *peer;
  3367. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3368. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  3369. if (!pdev)
  3370. return QDF_STATUS_E_FAULT;
  3371. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
  3372. sta_desc->peer_addr.bytes);
  3373. if (!peer)
  3374. return QDF_STATUS_E_FAULT;
  3375. qdf_spin_lock_bh(&peer->peer_info_lock);
  3376. peer->state = OL_TXRX_PEER_STATE_CONN;
  3377. qdf_spin_unlock_bh(&peer->peer_info_lock);
  3378. dp_rx_flush_rx_cached(peer, false);
  3379. return QDF_STATUS_SUCCESS;
  3380. }
  3381. QDF_STATUS
  3382. dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  3383. struct qdf_mac_addr peer_addr)
  3384. {
  3385. struct dp_peer *peer;
  3386. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3387. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  3388. if (!pdev)
  3389. return QDF_STATUS_E_FAULT;
  3390. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
  3391. if (!peer || !peer->valid)
  3392. return QDF_STATUS_E_FAULT;
  3393. dp_clear_peer_internal(soc, peer);
  3394. return QDF_STATUS_SUCCESS;
  3395. }
  3396. /**
  3397. * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
  3398. * @pdev - data path device instance
  3399. * @vdev - virtual interface instance
  3400. * @peer_addr - peer mac address
  3401. *
  3402. * Find peer by peer mac address within vdev
  3403. *
  3404. * Return: peer instance void pointer
  3405. * NULL cannot find target peer
  3406. */
  3407. void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
  3408. struct cdp_vdev *vdev_handle,
  3409. uint8_t *peer_addr)
  3410. {
  3411. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  3412. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  3413. struct dp_peer *peer;
  3414. peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL);
  3415. if (!peer)
  3416. return NULL;
  3417. if (peer->vdev != vdev) {
  3418. dp_peer_unref_delete(peer);
  3419. return NULL;
  3420. }
  3421. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  3422. * Decrement it here.
  3423. */
  3424. dp_peer_unref_delete(peer);
  3425. return peer;
  3426. }
  3427. QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  3428. enum ol_txrx_peer_state state)
  3429. {
  3430. struct dp_peer *peer;
  3431. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3432. peer = dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
  3433. if (!peer) {
  3434. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3435. "Failed to find peer for: [%pM]", peer_mac);
  3436. return QDF_STATUS_E_FAILURE;
  3437. }
  3438. peer->state = state;
  3439. dp_info("peer %pK state %d", peer, peer->state);
  3440. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  3441. * Decrement it here.
  3442. */
  3443. dp_peer_unref_delete(peer);
  3444. return QDF_STATUS_SUCCESS;
  3445. }
  3446. QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  3447. uint8_t *vdev_id)
  3448. {
  3449. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3450. struct dp_peer *peer =
  3451. dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
  3452. if (!peer)
  3453. return QDF_STATUS_E_FAILURE;
  3454. dp_info("peer %pK vdev %pK vdev id %d",
  3455. peer, peer->vdev, peer->vdev->vdev_id);
  3456. *vdev_id = peer->vdev->vdev_id;
  3457. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  3458. * Decrement it here.
  3459. */
  3460. dp_peer_unref_delete(peer);
  3461. return QDF_STATUS_SUCCESS;
  3462. }
  3463. struct cdp_vdev *
  3464. dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
  3465. struct qdf_mac_addr peer_addr)
  3466. {
  3467. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  3468. struct dp_peer *peer = NULL;
  3469. if (!pdev) {
  3470. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3471. "PDEV not found for peer_addr: %pM",
  3472. peer_addr.bytes);
  3473. return NULL;
  3474. }
  3475. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
  3476. if (!peer) {
  3477. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  3478. "PDEV not found for peer_addr: %pM",
  3479. peer_addr.bytes);
  3480. return NULL;
  3481. }
  3482. return (struct cdp_vdev *)peer->vdev;
  3483. }
  3484. /**
  3485. * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
  3486. * @peer - peer instance
  3487. *
  3488. * Get virtual interface instance which peer belongs
  3489. *
  3490. * Return: virtual interface instance pointer
  3491. * NULL in case cannot find
  3492. */
  3493. struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
  3494. {
  3495. struct dp_peer *peer = peer_handle;
  3496. DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
  3497. return (struct cdp_vdev *)peer->vdev;
  3498. }
  3499. /**
  3500. * dp_peer_get_peer_mac_addr() - Get peer mac address
  3501. * @peer - peer instance
  3502. *
  3503. * Get peer mac address
  3504. *
  3505. * Return: peer mac address pointer
  3506. * NULL in case cannot find
  3507. */
  3508. uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
  3509. {
  3510. struct dp_peer *peer = peer_handle;
  3511. uint8_t *mac;
  3512. mac = peer->mac_addr.raw;
  3513. dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
  3514. peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  3515. return peer->mac_addr.raw;
  3516. }
  3517. int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3518. uint8_t *peer_mac)
  3519. {
  3520. enum ol_txrx_peer_state peer_state;
  3521. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3522. struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0,
  3523. vdev_id);
  3524. if (!peer)
  3525. return QDF_STATUS_E_FAILURE;
  3526. DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
  3527. peer_state = peer->state;
  3528. dp_peer_unref_delete(peer);
  3529. return peer_state;
  3530. }
  3531. /**
  3532. * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
  3533. * @pdev - data path device instance
  3534. *
  3535. * local peer id pool alloc for physical device
  3536. *
  3537. * Return: none
  3538. */
  3539. void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
  3540. {
  3541. int i;
  3542. /* point the freelist to the first ID */
  3543. pdev->local_peer_ids.freelist = 0;
  3544. /* link each ID to the next one */
  3545. for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
  3546. pdev->local_peer_ids.pool[i] = i + 1;
  3547. pdev->local_peer_ids.map[i] = NULL;
  3548. }
  3549. /* link the last ID to itself, to mark the end of the list */
  3550. i = OL_TXRX_NUM_LOCAL_PEER_IDS;
  3551. pdev->local_peer_ids.pool[i] = i;
  3552. qdf_spinlock_create(&pdev->local_peer_ids.lock);
  3553. DP_TRACE(INFO, "Peer pool init");
  3554. }
  3555. /**
  3556. * dp_local_peer_id_alloc() - allocate local peer id
  3557. * @pdev - data path device instance
  3558. * @peer - new peer instance
  3559. *
  3560. * allocate local peer id
  3561. *
  3562. * Return: none
  3563. */
  3564. void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
  3565. {
  3566. int i;
  3567. qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  3568. i = pdev->local_peer_ids.freelist;
  3569. if (pdev->local_peer_ids.pool[i] == i) {
  3570. /* the list is empty, except for the list-end marker */
  3571. peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
  3572. } else {
  3573. /* take the head ID and advance the freelist */
  3574. peer->local_id = i;
  3575. pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
  3576. pdev->local_peer_ids.map[i] = peer;
  3577. }
  3578. qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  3579. dp_info("peer %pK, local id %d", peer, peer->local_id);
  3580. }
  3581. /**
  3582. * dp_local_peer_id_free() - remove local peer id
  3583. * @pdev - data path device instance
  3584. * @peer - peer instance should be removed
  3585. *
  3586. * remove local peer id
  3587. *
  3588. * Return: none
  3589. */
  3590. void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
  3591. {
  3592. int i = peer->local_id;
  3593. if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
  3594. (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
  3595. return;
  3596. }
  3597. /* put this ID on the head of the freelist */
  3598. qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  3599. pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
  3600. pdev->local_peer_ids.freelist = i;
  3601. pdev->local_peer_ids.map[i] = NULL;
  3602. qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  3603. }
  3604. bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
  3605. uint8_t vdev_id, uint8_t *peer_addr)
  3606. {
  3607. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3608. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  3609. if (!vdev)
  3610. return false;
  3611. return !!dp_find_peer_by_addr_and_vdev(
  3612. dp_pdev_to_cdp_pdev(vdev->pdev),
  3613. dp_vdev_to_cdp_vdev(vdev),
  3614. peer_addr);
  3615. }
  3616. bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
  3617. uint8_t vdev_id, uint8_t *peer_addr,
  3618. uint16_t max_bssid)
  3619. {
  3620. int i;
  3621. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3622. struct dp_vdev *vdev;
  3623. for (i = 0; i < max_bssid; i++) {
  3624. vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, i);
  3625. /* Need to check vdevs other than the vdev_id */
  3626. if (vdev_id == i || !vdev)
  3627. continue;
  3628. if (dp_find_peer_by_addr_and_vdev(
  3629. dp_pdev_to_cdp_pdev(vdev->pdev),
  3630. dp_vdev_to_cdp_vdev(vdev),
  3631. peer_addr)) {
  3632. dp_err("%s: Duplicate peer %pM already exist on vdev %d",
  3633. __func__, peer_addr, i);
  3634. return true;
  3635. }
  3636. }
  3637. return false;
  3638. }
  3639. bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  3640. uint8_t *peer_addr)
  3641. {
  3642. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3643. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  3644. if (!pdev)
  3645. return false;
  3646. return !!dp_find_peer_by_addr(dp_pdev_to_cdp_pdev(pdev), peer_addr);
  3647. }
  3648. #endif
  3649. /**
  3650. * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
  3651. * @peer: DP peer handle
  3652. * @dp_stats_cmd_cb: REO command callback function
  3653. * @cb_ctxt: Callback context
  3654. *
  3655. * Return: count of tid stats cmd send succeeded
  3656. */
  3657. int dp_peer_rxtid_stats(struct dp_peer *peer,
  3658. dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
  3659. void *cb_ctxt)
  3660. {
  3661. struct dp_soc *soc = peer->vdev->pdev->soc;
  3662. struct hal_reo_cmd_params params;
  3663. int i;
  3664. int stats_cmd_sent_cnt = 0;
  3665. QDF_STATUS status;
  3666. if (!dp_stats_cmd_cb)
  3667. return stats_cmd_sent_cnt;
  3668. qdf_mem_zero(&params, sizeof(params));
  3669. for (i = 0; i < DP_MAX_TIDS; i++) {
  3670. struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
  3671. if (rx_tid->hw_qdesc_vaddr_unaligned) {
  3672. params.std.need_status = 1;
  3673. params.std.addr_lo =
  3674. rx_tid->hw_qdesc_paddr & 0xffffffff;
  3675. params.std.addr_hi =
  3676. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  3677. if (cb_ctxt) {
  3678. status = dp_reo_send_cmd(
  3679. soc, CMD_GET_QUEUE_STATS,
  3680. &params, dp_stats_cmd_cb,
  3681. cb_ctxt);
  3682. } else {
  3683. status = dp_reo_send_cmd(
  3684. soc, CMD_GET_QUEUE_STATS,
  3685. &params, dp_stats_cmd_cb,
  3686. rx_tid);
  3687. }
  3688. if (QDF_IS_STATUS_SUCCESS(status))
  3689. stats_cmd_sent_cnt++;
  3690. /* Flush REO descriptor from HW cache to update stats
  3691. * in descriptor memory. This is to help debugging */
  3692. qdf_mem_zero(&params, sizeof(params));
  3693. params.std.need_status = 0;
  3694. params.std.addr_lo =
  3695. rx_tid->hw_qdesc_paddr & 0xffffffff;
  3696. params.std.addr_hi =
  3697. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  3698. params.u.fl_cache_params.flush_no_inval = 1;
  3699. dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
  3700. NULL);
  3701. }
  3702. }
  3703. return stats_cmd_sent_cnt;
  3704. }
  3705. QDF_STATUS
  3706. dp_set_michael_key(struct cdp_soc_t *soc,
  3707. uint8_t vdev_id,
  3708. uint8_t *peer_mac,
  3709. bool is_unicast, uint32_t *key)
  3710. {
  3711. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3712. uint8_t sec_index = is_unicast ? 1 : 0;
  3713. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  3714. peer_mac, 0, vdev_id);
  3715. if (!peer || peer->delete_in_progress) {
  3716. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3717. "peer not found ");
  3718. status = QDF_STATUS_E_FAILURE;
  3719. goto fail;
  3720. }
  3721. qdf_mem_copy(&peer->security[sec_index].michael_key[0],
  3722. key, IEEE80211_WEP_MICLEN);
  3723. fail:
  3724. if (peer)
  3725. dp_peer_unref_delete(peer);
  3726. return status;
  3727. }
  3728. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
  3729. {
  3730. struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
  3731. if (peer) {
  3732. /*
  3733. * Decrement the peer ref which is taken as part of
  3734. * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
  3735. */
  3736. dp_peer_unref_delete(peer);
  3737. return true;
  3738. }
  3739. return false;
  3740. }
  3741. /**
  3742. * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
  3743. * @soc: DP soc
  3744. * @vdev: vdev
  3745. *
  3746. * Return: VDEV BSS peer
  3747. */
  3748. struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
  3749. struct dp_vdev *vdev)
  3750. {
  3751. struct dp_peer *peer;
  3752. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3753. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3754. if (peer->bss_peer)
  3755. break;
  3756. }
  3757. if (!peer || !qdf_atomic_inc_not_zero(&peer->ref_cnt)) {
  3758. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3759. return NULL;
  3760. }
  3761. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3762. return peer;
  3763. }
  3764. /**
  3765. * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
  3766. * @soc: DP soc
  3767. * @vdev: vdev
  3768. *
  3769. * Return: VDEV self peer
  3770. */
  3771. struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
  3772. struct dp_vdev *vdev)
  3773. {
  3774. struct dp_peer *peer;
  3775. if (vdev->opmode != wlan_op_mode_sta)
  3776. return NULL;
  3777. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3778. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3779. if (peer->sta_self_peer)
  3780. break;
  3781. }
  3782. if (!peer || !qdf_atomic_inc_not_zero(&peer->ref_cnt)) {
  3783. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3784. return NULL;
  3785. }
  3786. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3787. return peer;
  3788. }