dp_peer.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <hal_hw_headers.h>
  21. #include "dp_htt.h"
  22. #include "dp_types.h"
  23. #include "dp_internal.h"
  24. #include "dp_peer.h"
  25. #include "dp_rx_defrag.h"
  26. #include "dp_rx.h"
  27. #include <hal_api.h>
  28. #include <hal_reo.h>
  29. #include <cdp_txrx_handle.h>
  30. #include <wlan_cfg.h>
  31. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  32. #include "dp_tx_capture.h"
  33. #endif
  34. #ifdef FEATURE_WDS
  35. static inline bool
  36. dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
  37. struct dp_ast_entry *ast_entry)
  38. {
  39. /* if peer map v2 is enabled we are not freeing ast entry
  40. * here and it is supposed to be freed in unmap event (after
  41. * we receive delete confirmation from target)
  42. *
  43. * if peer_id is invalid we did not get the peer map event
  44. * for the peer free ast entry from here only in this case
  45. */
  46. if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
  47. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
  48. return true;
  49. return false;
  50. }
  51. #else
  52. static inline bool
  53. dp_peer_ast_free_in_unmap_supported(struct dp_peer *peer,
  54. struct dp_ast_entry *ast_entry)
  55. {
  56. return false;
  57. }
  58. #endif
  59. static inline void
  60. dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
  61. uint8_t valid)
  62. {
  63. params->u.upd_queue_params.update_svld = 1;
  64. params->u.upd_queue_params.svld = valid;
  65. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  66. "%s: Setting SSN valid bit to %d",
  67. __func__, valid);
  68. }
  69. static inline int dp_peer_find_mac_addr_cmp(
  70. union dp_align_mac_addr *mac_addr1,
  71. union dp_align_mac_addr *mac_addr2)
  72. {
  73. /*
  74. * Intentionally use & rather than &&.
  75. * because the operands are binary rather than generic boolean,
  76. * the functionality is equivalent.
  77. * Using && has the advantage of short-circuited evaluation,
  78. * but using & has the advantage of no conditional branching,
  79. * which is a more significant benefit.
  80. */
  81. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  82. & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  83. }
  84. static int dp_peer_ast_table_attach(struct dp_soc *soc)
  85. {
  86. uint32_t max_ast_index;
  87. max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
  88. /* allocate ast_table for ast entry to ast_index map */
  89. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  90. "\n<=== cfg max ast idx %d ====>", max_ast_index);
  91. soc->ast_table = qdf_mem_malloc(max_ast_index *
  92. sizeof(struct dp_ast_entry *));
  93. if (!soc->ast_table) {
  94. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  95. "%s: ast_table memory allocation failed", __func__);
  96. return QDF_STATUS_E_NOMEM;
  97. }
  98. return 0; /* success */
  99. }
  100. static int dp_peer_find_map_attach(struct dp_soc *soc)
  101. {
  102. uint32_t max_peers, peer_map_size;
  103. max_peers = soc->max_peers;
  104. /* allocate the peer ID -> peer object map */
  105. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  106. "\n<=== cfg max peer id %d ====>", max_peers);
  107. peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
  108. soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
  109. if (!soc->peer_id_to_obj_map) {
  110. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  111. "%s: peer map memory allocation failed", __func__);
  112. return QDF_STATUS_E_NOMEM;
  113. }
  114. /*
  115. * The peer_id_to_obj_map doesn't really need to be initialized,
  116. * since elements are only used after they have been individually
  117. * initialized.
  118. * However, it is convenient for debugging to have all elements
  119. * that are not in use set to 0.
  120. */
  121. qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
  122. return 0; /* success */
  123. }
  124. static int dp_log2_ceil(unsigned int value)
  125. {
  126. unsigned int tmp = value;
  127. int log2 = -1;
  128. while (tmp) {
  129. log2++;
  130. tmp >>= 1;
  131. }
  132. if (1 << log2 != value)
  133. log2++;
  134. return log2;
  135. }
  136. static int dp_peer_find_add_id_to_obj(
  137. struct dp_peer *peer,
  138. uint16_t peer_id)
  139. {
  140. int i;
  141. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
  142. if (peer->peer_ids[i] == HTT_INVALID_PEER) {
  143. peer->peer_ids[i] = peer_id;
  144. return 0; /* success */
  145. }
  146. }
  147. return QDF_STATUS_E_FAILURE; /* failure */
  148. }
  149. #define DP_PEER_HASH_LOAD_MULT 2
  150. #define DP_PEER_HASH_LOAD_SHIFT 0
  151. #define DP_AST_HASH_LOAD_MULT 2
  152. #define DP_AST_HASH_LOAD_SHIFT 0
  153. static int dp_peer_find_hash_attach(struct dp_soc *soc)
  154. {
  155. int i, hash_elems, log2;
  156. /* allocate the peer MAC address -> peer object hash table */
  157. hash_elems = soc->max_peers;
  158. hash_elems *= DP_PEER_HASH_LOAD_MULT;
  159. hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
  160. log2 = dp_log2_ceil(hash_elems);
  161. hash_elems = 1 << log2;
  162. soc->peer_hash.mask = hash_elems - 1;
  163. soc->peer_hash.idx_bits = log2;
  164. /* allocate an array of TAILQ peer object lists */
  165. soc->peer_hash.bins = qdf_mem_malloc(
  166. hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
  167. if (!soc->peer_hash.bins)
  168. return QDF_STATUS_E_NOMEM;
  169. for (i = 0; i < hash_elems; i++)
  170. TAILQ_INIT(&soc->peer_hash.bins[i]);
  171. return 0;
  172. }
  173. static void dp_peer_find_hash_detach(struct dp_soc *soc)
  174. {
  175. if (soc->peer_hash.bins) {
  176. qdf_mem_free(soc->peer_hash.bins);
  177. soc->peer_hash.bins = NULL;
  178. }
  179. }
  180. static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
  181. union dp_align_mac_addr *mac_addr)
  182. {
  183. unsigned index;
  184. index =
  185. mac_addr->align2.bytes_ab ^
  186. mac_addr->align2.bytes_cd ^
  187. mac_addr->align2.bytes_ef;
  188. index ^= index >> soc->peer_hash.idx_bits;
  189. index &= soc->peer_hash.mask;
  190. return index;
  191. }
  192. void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
  193. {
  194. unsigned index;
  195. index = dp_peer_find_hash_index(soc, &peer->mac_addr);
  196. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  197. /*
  198. * It is important to add the new peer at the tail of the peer list
  199. * with the bin index. Together with having the hash_find function
  200. * search from head to tail, this ensures that if two entries with
  201. * the same MAC address are stored, the one added first will be
  202. * found first.
  203. */
  204. TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
  205. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  206. }
  207. #ifdef FEATURE_AST
  208. /*
  209. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  210. * @soc: SoC handle
  211. *
  212. * Return: None
  213. */
  214. static int dp_peer_ast_hash_attach(struct dp_soc *soc)
  215. {
  216. int i, hash_elems, log2;
  217. unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
  218. hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
  219. DP_AST_HASH_LOAD_SHIFT);
  220. log2 = dp_log2_ceil(hash_elems);
  221. hash_elems = 1 << log2;
  222. soc->ast_hash.mask = hash_elems - 1;
  223. soc->ast_hash.idx_bits = log2;
  224. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  225. "ast hash_elems: %d, max_ast_idx: %d",
  226. hash_elems, max_ast_idx);
  227. /* allocate an array of TAILQ peer object lists */
  228. soc->ast_hash.bins = qdf_mem_malloc(
  229. hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
  230. dp_ast_entry)));
  231. if (!soc->ast_hash.bins)
  232. return QDF_STATUS_E_NOMEM;
  233. for (i = 0; i < hash_elems; i++)
  234. TAILQ_INIT(&soc->ast_hash.bins[i]);
  235. return 0;
  236. }
  237. /*
  238. * dp_peer_ast_cleanup() - cleanup the references
  239. * @soc: SoC handle
  240. * @ast: ast entry
  241. *
  242. * Return: None
  243. */
  244. static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
  245. struct dp_ast_entry *ast)
  246. {
  247. txrx_ast_free_cb cb = ast->callback;
  248. void *cookie = ast->cookie;
  249. /* Call the callbacks to free up the cookie */
  250. if (cb) {
  251. ast->callback = NULL;
  252. ast->cookie = NULL;
  253. cb(soc->ctrl_psoc,
  254. dp_soc_to_cdp_soc(soc),
  255. cookie,
  256. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  257. }
  258. }
  259. /*
  260. * dp_peer_ast_hash_detach() - Free AST Hash table
  261. * @soc: SoC handle
  262. *
  263. * Return: None
  264. */
  265. static void dp_peer_ast_hash_detach(struct dp_soc *soc)
  266. {
  267. unsigned int index;
  268. struct dp_ast_entry *ast, *ast_next;
  269. if (!soc->ast_hash.mask)
  270. return;
  271. if (!soc->ast_hash.bins)
  272. return;
  273. qdf_spin_lock_bh(&soc->ast_lock);
  274. for (index = 0; index <= soc->ast_hash.mask; index++) {
  275. if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
  276. TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
  277. hash_list_elem, ast_next) {
  278. TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
  279. hash_list_elem);
  280. dp_peer_ast_cleanup(soc, ast);
  281. qdf_mem_free(ast);
  282. }
  283. }
  284. }
  285. qdf_spin_unlock_bh(&soc->ast_lock);
  286. qdf_mem_free(soc->ast_hash.bins);
  287. soc->ast_hash.bins = NULL;
  288. }
  289. /*
  290. * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
  291. * @soc: SoC handle
  292. *
  293. * Return: AST hash
  294. */
  295. static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
  296. union dp_align_mac_addr *mac_addr)
  297. {
  298. uint32_t index;
  299. index =
  300. mac_addr->align2.bytes_ab ^
  301. mac_addr->align2.bytes_cd ^
  302. mac_addr->align2.bytes_ef;
  303. index ^= index >> soc->ast_hash.idx_bits;
  304. index &= soc->ast_hash.mask;
  305. return index;
  306. }
  307. /*
  308. * dp_peer_ast_hash_add() - Add AST entry into hash table
  309. * @soc: SoC handle
  310. *
  311. * This function adds the AST entry into SoC AST hash table
  312. * It assumes caller has taken the ast lock to protect the access to this table
  313. *
  314. * Return: None
  315. */
  316. static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
  317. struct dp_ast_entry *ase)
  318. {
  319. uint32_t index;
  320. index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
  321. TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
  322. }
  323. /*
  324. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  325. * @soc: SoC handle
  326. *
  327. * This function removes the AST entry from soc AST hash table
  328. * It assumes caller has taken the ast lock to protect the access to this table
  329. *
  330. * Return: None
  331. */
  332. void dp_peer_ast_hash_remove(struct dp_soc *soc,
  333. struct dp_ast_entry *ase)
  334. {
  335. unsigned index;
  336. struct dp_ast_entry *tmpase;
  337. int found = 0;
  338. index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
  339. /* Check if tail is not empty before delete*/
  340. QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
  341. TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
  342. if (tmpase == ase) {
  343. found = 1;
  344. break;
  345. }
  346. }
  347. QDF_ASSERT(found);
  348. TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
  349. }
  350. /*
  351. * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
  352. * @soc: SoC handle
  353. * @peer: peer handle
  354. * @ast_mac_addr: mac address
  355. *
  356. * It assumes caller has taken the ast lock to protect the access to ast list
  357. *
  358. * Return: AST entry
  359. */
  360. struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
  361. struct dp_peer *peer,
  362. uint8_t *ast_mac_addr)
  363. {
  364. struct dp_ast_entry *ast_entry = NULL;
  365. union dp_align_mac_addr *mac_addr =
  366. (union dp_align_mac_addr *)ast_mac_addr;
  367. TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
  368. if (!dp_peer_find_mac_addr_cmp(mac_addr,
  369. &ast_entry->mac_addr)) {
  370. return ast_entry;
  371. }
  372. }
  373. return NULL;
  374. }
  375. /*
  376. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  377. * @soc: SoC handle
  378. *
  379. * It assumes caller has taken the ast lock to protect the access to
  380. * AST hash table
  381. *
  382. * Return: AST entry
  383. */
  384. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  385. uint8_t *ast_mac_addr,
  386. uint8_t pdev_id)
  387. {
  388. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  389. uint32_t index;
  390. struct dp_ast_entry *ase;
  391. qdf_mem_copy(&local_mac_addr_aligned.raw[0],
  392. ast_mac_addr, QDF_MAC_ADDR_SIZE);
  393. mac_addr = &local_mac_addr_aligned;
  394. index = dp_peer_ast_hash_index(soc, mac_addr);
  395. TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
  396. if ((pdev_id == ase->pdev_id) &&
  397. !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
  398. return ase;
  399. }
  400. }
  401. return NULL;
  402. }
  403. /*
  404. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  405. * @soc: SoC handle
  406. *
  407. * It assumes caller has taken the ast lock to protect the access to
  408. * AST hash table
  409. *
  410. * Return: AST entry
  411. */
  412. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  413. uint8_t *ast_mac_addr)
  414. {
  415. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  416. unsigned index;
  417. struct dp_ast_entry *ase;
  418. qdf_mem_copy(&local_mac_addr_aligned.raw[0],
  419. ast_mac_addr, QDF_MAC_ADDR_SIZE);
  420. mac_addr = &local_mac_addr_aligned;
  421. index = dp_peer_ast_hash_index(soc, mac_addr);
  422. TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
  423. if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
  424. return ase;
  425. }
  426. }
  427. return NULL;
  428. }
  429. /*
  430. * dp_peer_map_ast() - Map the ast entry with HW AST Index
  431. * @soc: SoC handle
  432. * @peer: peer to which ast node belongs
  433. * @mac_addr: MAC address of ast node
  434. * @hw_peer_id: HW AST Index returned by target in peer map event
  435. * @vdev_id: vdev id for VAP to which the peer belongs to
  436. * @ast_hash: ast hash value in HW
  437. *
  438. * Return: None
  439. */
  440. static inline void dp_peer_map_ast(struct dp_soc *soc,
  441. struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
  442. uint8_t vdev_id, uint16_t ast_hash)
  443. {
  444. struct dp_ast_entry *ast_entry = NULL;
  445. enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
  446. if (!peer) {
  447. return;
  448. }
  449. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  450. "%s: peer %pK ID %d vid %d mac %pM",
  451. __func__, peer, hw_peer_id, vdev_id, mac_addr);
  452. qdf_spin_lock_bh(&soc->ast_lock);
  453. ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
  454. if (ast_entry) {
  455. ast_entry->ast_idx = hw_peer_id;
  456. soc->ast_table[hw_peer_id] = ast_entry;
  457. ast_entry->is_active = TRUE;
  458. peer_type = ast_entry->type;
  459. ast_entry->ast_hash_value = ast_hash;
  460. ast_entry->is_mapped = TRUE;
  461. }
  462. if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
  463. if (soc->cdp_soc.ol_ops->peer_map_event) {
  464. soc->cdp_soc.ol_ops->peer_map_event(
  465. soc->ctrl_psoc, peer->peer_ids[0],
  466. hw_peer_id, vdev_id,
  467. mac_addr, peer_type, ast_hash);
  468. }
  469. } else {
  470. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  471. "AST entry not found");
  472. }
  473. qdf_spin_unlock_bh(&soc->ast_lock);
  474. return;
  475. }
  476. void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  477. struct cdp_soc *dp_soc,
  478. void *cookie,
  479. enum cdp_ast_free_status status)
  480. {
  481. struct dp_ast_free_cb_params *param =
  482. (struct dp_ast_free_cb_params *)cookie;
  483. struct dp_soc *soc = (struct dp_soc *)dp_soc;
  484. struct dp_peer *peer = NULL;
  485. if (status != CDP_TXRX_AST_DELETED) {
  486. qdf_mem_free(cookie);
  487. return;
  488. }
  489. peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
  490. 0, param->vdev_id);
  491. if (peer) {
  492. dp_peer_add_ast(soc, peer,
  493. &param->mac_addr.raw[0],
  494. param->type,
  495. param->flags);
  496. dp_peer_unref_delete(peer);
  497. }
  498. qdf_mem_free(cookie);
  499. }
  500. /*
  501. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  502. * @soc: SoC handle
  503. * @peer: peer to which ast node belongs
  504. * @mac_addr: MAC address of ast node
  505. * @is_self: Is this base AST entry with peer mac address
  506. *
  507. * This API is used by WDS source port learning function to
  508. * add a new AST entry into peer AST list
  509. *
  510. * Return: 0 if new entry is allocated,
  511. * -1 if entry add failed
  512. */
  513. int dp_peer_add_ast(struct dp_soc *soc,
  514. struct dp_peer *peer,
  515. uint8_t *mac_addr,
  516. enum cdp_txrx_ast_entry_type type,
  517. uint32_t flags)
  518. {
  519. struct dp_ast_entry *ast_entry = NULL;
  520. struct dp_vdev *vdev = NULL, *tmp_vdev = NULL;
  521. struct dp_pdev *pdev = NULL;
  522. uint8_t next_node_mac[6];
  523. int ret = -1;
  524. txrx_ast_free_cb cb = NULL;
  525. void *cookie = NULL;
  526. struct dp_peer *tmp_peer = NULL;
  527. bool is_peer_found = false;
  528. vdev = peer->vdev;
  529. if (!vdev) {
  530. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  531. FL("Peers vdev is NULL"));
  532. QDF_ASSERT(0);
  533. return ret;
  534. }
  535. pdev = vdev->pdev;
  536. tmp_peer = dp_peer_find_hash_find(soc, mac_addr, 0,
  537. DP_VDEV_ALL);
  538. if (tmp_peer) {
  539. tmp_vdev = tmp_peer->vdev;
  540. if (!tmp_vdev) {
  541. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  542. FL("Peers vdev is NULL"));
  543. QDF_ASSERT(0);
  544. dp_peer_unref_delete(tmp_peer);
  545. return ret;
  546. }
  547. if (tmp_vdev->pdev->pdev_id == pdev->pdev_id)
  548. is_peer_found = true;
  549. dp_peer_unref_delete(tmp_peer);
  550. }
  551. qdf_spin_lock_bh(&soc->ast_lock);
  552. if (peer->delete_in_progress) {
  553. qdf_spin_unlock_bh(&soc->ast_lock);
  554. return ret;
  555. }
  556. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  557. "%s: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
  558. __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
  559. peer->mac_addr.raw, peer, mac_addr);
  560. /* fw supports only 2 times the max_peers ast entries */
  561. if (soc->num_ast_entries >=
  562. wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
  563. qdf_spin_unlock_bh(&soc->ast_lock);
  564. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  565. FL("Max ast entries reached"));
  566. return ret;
  567. }
  568. /* If AST entry already exists , just return from here
  569. * ast entry with same mac address can exist on different radios
  570. * if ast_override support is enabled use search by pdev in this
  571. * case
  572. */
  573. if (soc->ast_override_support) {
  574. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
  575. pdev->pdev_id);
  576. if (ast_entry) {
  577. if ((type == CDP_TXRX_AST_TYPE_MEC) &&
  578. (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
  579. ast_entry->is_active = TRUE;
  580. qdf_spin_unlock_bh(&soc->ast_lock);
  581. return 0;
  582. }
  583. if (is_peer_found) {
  584. /* During WDS to static roaming, peer is added
  585. * to the list before static AST entry create.
  586. * So, allow AST entry for STATIC type
  587. * even if peer is present
  588. */
  589. if (type != CDP_TXRX_AST_TYPE_STATIC) {
  590. qdf_spin_unlock_bh(&soc->ast_lock);
  591. return 0;
  592. }
  593. }
  594. } else {
  595. /* For HWMWDS_SEC entries can be added for same mac address
  596. * do not check for existing entry
  597. */
  598. if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
  599. goto add_ast_entry;
  600. ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
  601. if (ast_entry) {
  602. if ((type == CDP_TXRX_AST_TYPE_MEC) &&
  603. (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
  604. ast_entry->is_active = TRUE;
  605. if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
  606. !ast_entry->delete_in_progress) {
  607. qdf_spin_unlock_bh(&soc->ast_lock);
  608. return 0;
  609. }
  610. /* Add for HMWDS entry we cannot be ignored if there
  611. * is AST entry with same mac address
  612. *
  613. * if ast entry exists with the requested mac address
  614. * send a delete command and register callback which
  615. * can take care of adding HMWDS ast enty on delete
  616. * confirmation from target
  617. */
  618. if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
  619. struct dp_ast_free_cb_params *param = NULL;
  620. if (ast_entry->type ==
  621. CDP_TXRX_AST_TYPE_WDS_HM_SEC)
  622. goto add_ast_entry;
  623. /* save existing callback */
  624. if (ast_entry->callback) {
  625. cb = ast_entry->callback;
  626. cookie = ast_entry->cookie;
  627. }
  628. param = qdf_mem_malloc(sizeof(*param));
  629. if (!param) {
  630. QDF_TRACE(QDF_MODULE_ID_TXRX,
  631. QDF_TRACE_LEVEL_ERROR,
  632. "Allocation failed");
  633. qdf_spin_unlock_bh(&soc->ast_lock);
  634. return ret;
  635. }
  636. qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
  637. QDF_MAC_ADDR_SIZE);
  638. qdf_mem_copy(&param->peer_mac_addr.raw[0],
  639. &peer->mac_addr.raw[0],
  640. QDF_MAC_ADDR_SIZE);
  641. param->type = type;
  642. param->flags = flags;
  643. param->vdev_id = vdev->vdev_id;
  644. ast_entry->callback = dp_peer_free_hmwds_cb;
  645. ast_entry->pdev_id = vdev->pdev->pdev_id;
  646. ast_entry->type = type;
  647. ast_entry->cookie = (void *)param;
  648. if (!ast_entry->delete_in_progress)
  649. dp_peer_del_ast(soc, ast_entry);
  650. }
  651. /* Modify an already existing AST entry from type
  652. * WDS to MEC on promption. This serves as a fix when
  653. * backbone of interfaces are interchanged wherein
  654. * wds entr becomes its own MEC. The entry should be
  655. * replaced only when the ast_entry peer matches the
  656. * peer received in mec event. This additional check
  657. * is needed in wds repeater cases where a multicast
  658. * packet from station to the root via the repeater
  659. * should not remove the wds entry.
  660. */
  661. if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
  662. (type == CDP_TXRX_AST_TYPE_MEC) &&
  663. (ast_entry->peer == peer)) {
  664. ast_entry->is_active = FALSE;
  665. dp_peer_del_ast(soc, ast_entry);
  666. }
  667. qdf_spin_unlock_bh(&soc->ast_lock);
  668. /* Call the saved callback*/
  669. if (cb) {
  670. cb(soc->ctrl_psoc,
  671. dp_soc_to_cdp_soc(soc),
  672. cookie,
  673. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  674. }
  675. return 0;
  676. }
  677. }
  678. add_ast_entry:
  679. ast_entry = (struct dp_ast_entry *)
  680. qdf_mem_malloc(sizeof(struct dp_ast_entry));
  681. if (!ast_entry) {
  682. qdf_spin_unlock_bh(&soc->ast_lock);
  683. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  684. FL("fail to allocate ast_entry"));
  685. QDF_ASSERT(0);
  686. return ret;
  687. }
  688. qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
  689. ast_entry->pdev_id = vdev->pdev->pdev_id;
  690. ast_entry->is_mapped = false;
  691. ast_entry->delete_in_progress = false;
  692. switch (type) {
  693. case CDP_TXRX_AST_TYPE_STATIC:
  694. peer->self_ast_entry = ast_entry;
  695. ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
  696. if (peer->vdev->opmode == wlan_op_mode_sta)
  697. ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
  698. break;
  699. case CDP_TXRX_AST_TYPE_SELF:
  700. peer->self_ast_entry = ast_entry;
  701. ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
  702. break;
  703. case CDP_TXRX_AST_TYPE_WDS:
  704. ast_entry->next_hop = 1;
  705. ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
  706. break;
  707. case CDP_TXRX_AST_TYPE_WDS_HM:
  708. ast_entry->next_hop = 1;
  709. ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
  710. break;
  711. case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
  712. ast_entry->next_hop = 1;
  713. ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
  714. break;
  715. case CDP_TXRX_AST_TYPE_MEC:
  716. ast_entry->next_hop = 1;
  717. ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
  718. break;
  719. case CDP_TXRX_AST_TYPE_DA:
  720. peer = peer->vdev->vap_bss_peer;
  721. ast_entry->next_hop = 1;
  722. ast_entry->type = CDP_TXRX_AST_TYPE_DA;
  723. break;
  724. default:
  725. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  726. FL("Incorrect AST entry type"));
  727. }
  728. ast_entry->is_active = TRUE;
  729. DP_STATS_INC(soc, ast.added, 1);
  730. soc->num_ast_entries++;
  731. dp_peer_ast_hash_add(soc, ast_entry);
  732. ast_entry->peer = peer;
  733. if (type == CDP_TXRX_AST_TYPE_MEC)
  734. qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
  735. else
  736. qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
  737. TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
  738. if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
  739. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
  740. (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
  741. (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
  742. if (QDF_STATUS_SUCCESS ==
  743. soc->cdp_soc.ol_ops->peer_add_wds_entry(
  744. soc->ctrl_psoc,
  745. peer->vdev->vdev_id,
  746. peer->mac_addr.raw,
  747. mac_addr,
  748. next_node_mac,
  749. flags,
  750. ast_entry->type)) {
  751. qdf_spin_unlock_bh(&soc->ast_lock);
  752. return 0;
  753. }
  754. }
  755. qdf_spin_unlock_bh(&soc->ast_lock);
  756. return ret;
  757. }
  758. /*
  759. * dp_peer_free_ast_entry() - Free up the ast entry memory
  760. * @soc: SoC handle
  761. * @ast_entry: Address search entry
  762. *
  763. * This API is used to free up the memory associated with
  764. * AST entry.
  765. *
  766. * Return: None
  767. */
  768. void dp_peer_free_ast_entry(struct dp_soc *soc,
  769. struct dp_ast_entry *ast_entry)
  770. {
  771. /*
  772. * NOTE: Ensure that call to this API is done
  773. * after soc->ast_lock is taken
  774. */
  775. ast_entry->callback = NULL;
  776. ast_entry->cookie = NULL;
  777. DP_STATS_INC(soc, ast.deleted, 1);
  778. dp_peer_ast_hash_remove(soc, ast_entry);
  779. dp_peer_ast_cleanup(soc, ast_entry);
  780. qdf_mem_free(ast_entry);
  781. soc->num_ast_entries--;
  782. }
  783. /*
  784. * dp_peer_unlink_ast_entry() - Free up the ast entry memory
  785. * @soc: SoC handle
  786. * @ast_entry: Address search entry
  787. *
  788. * This API is used to remove/unlink AST entry from the peer list
  789. * and hash list.
  790. *
  791. * Return: None
  792. */
  793. void dp_peer_unlink_ast_entry(struct dp_soc *soc,
  794. struct dp_ast_entry *ast_entry)
  795. {
  796. /*
  797. * NOTE: Ensure that call to this API is done
  798. * after soc->ast_lock is taken
  799. */
  800. struct dp_peer *peer = ast_entry->peer;
  801. TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
  802. if (ast_entry == peer->self_ast_entry)
  803. peer->self_ast_entry = NULL;
  804. /*
  805. * release the reference only if it is mapped
  806. * to ast_table
  807. */
  808. if (ast_entry->is_mapped)
  809. soc->ast_table[ast_entry->ast_idx] = NULL;
  810. ast_entry->peer = NULL;
  811. }
  812. /*
  813. * dp_peer_del_ast() - Delete and free AST entry
  814. * @soc: SoC handle
  815. * @ast_entry: AST entry of the node
  816. *
  817. * This function removes the AST entry from peer and soc tables
  818. * It assumes caller has taken the ast lock to protect the access to these
  819. * tables
  820. *
  821. * Return: None
  822. */
  823. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
  824. {
  825. struct dp_peer *peer;
  826. if (!ast_entry)
  827. return;
  828. if (ast_entry->delete_in_progress)
  829. return;
  830. ast_entry->delete_in_progress = true;
  831. peer = ast_entry->peer;
  832. dp_peer_ast_send_wds_del(soc, ast_entry);
  833. /* Remove SELF and STATIC entries in teardown itself */
  834. if (!ast_entry->next_hop)
  835. dp_peer_unlink_ast_entry(soc, ast_entry);
  836. if (ast_entry->is_mapped)
  837. soc->ast_table[ast_entry->ast_idx] = NULL;
  838. /* if peer map v2 is enabled we are not freeing ast entry
  839. * here and it is supposed to be freed in unmap event (after
  840. * we receive delete confirmation from target)
  841. *
  842. * if peer_id is invalid we did not get the peer map event
  843. * for the peer free ast entry from here only in this case
  844. */
  845. if (dp_peer_ast_free_in_unmap_supported(peer, ast_entry))
  846. return;
  847. /* for WDS secondary entry ast_entry->next_hop would be set so
  848. * unlinking has to be done explicitly here.
  849. * As this entry is not a mapped entry unmap notification from
  850. * FW wil not come. Hence unlinkling is done right here.
  851. */
  852. if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
  853. dp_peer_unlink_ast_entry(soc, ast_entry);
  854. dp_peer_free_ast_entry(soc, ast_entry);
  855. }
  856. /*
  857. * dp_peer_update_ast() - Delete and free AST entry
  858. * @soc: SoC handle
  859. * @peer: peer to which ast node belongs
  860. * @ast_entry: AST entry of the node
  861. * @flags: wds or hmwds
  862. *
  863. * This function update the AST entry to the roamed peer and soc tables
  864. * It assumes caller has taken the ast lock to protect the access to these
  865. * tables
  866. *
  867. * Return: 0 if ast entry is updated successfully
  868. * -1 failure
  869. */
  870. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  871. struct dp_ast_entry *ast_entry, uint32_t flags)
  872. {
  873. int ret = -1;
  874. struct dp_peer *old_peer;
  875. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  876. "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
  877. __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
  878. peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
  879. peer->mac_addr.raw);
  880. /* Do not send AST update in below cases
  881. * 1) Ast entry delete has already triggered
  882. * 2) Peer delete is already triggered
  883. * 3) We did not get the HTT map for create event
  884. */
  885. if (ast_entry->delete_in_progress || peer->delete_in_progress ||
  886. !ast_entry->is_mapped)
  887. return ret;
  888. if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
  889. (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
  890. (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
  891. (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
  892. return 0;
  893. /*
  894. * Avoids flood of WMI update messages sent to FW for same peer.
  895. */
  896. if (qdf_unlikely(ast_entry->peer == peer) &&
  897. (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
  898. (ast_entry->peer->vdev == peer->vdev) &&
  899. (ast_entry->is_active))
  900. return 0;
  901. old_peer = ast_entry->peer;
  902. TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
  903. ast_entry->peer = peer;
  904. ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
  905. ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
  906. ast_entry->is_active = TRUE;
  907. TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
  908. ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
  909. soc->ctrl_psoc,
  910. peer->vdev->vdev_id,
  911. ast_entry->mac_addr.raw,
  912. peer->mac_addr.raw,
  913. flags);
  914. return ret;
  915. }
  916. /*
  917. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  918. * @soc: SoC handle
  919. * @ast_entry: AST entry of the node
  920. *
  921. * This function gets the pdev_id from the ast entry.
  922. *
  923. * Return: (uint8_t) pdev_id
  924. */
  925. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  926. struct dp_ast_entry *ast_entry)
  927. {
  928. return ast_entry->pdev_id;
  929. }
  930. /*
  931. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  932. * @soc: SoC handle
  933. * @ast_entry: AST entry of the node
  934. *
  935. * This function gets the next hop from the ast entry.
  936. *
  937. * Return: (uint8_t) next_hop
  938. */
  939. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  940. struct dp_ast_entry *ast_entry)
  941. {
  942. return ast_entry->next_hop;
  943. }
  944. /*
  945. * dp_peer_ast_set_type() - set type from the ast entry
  946. * @soc: SoC handle
  947. * @ast_entry: AST entry of the node
  948. *
  949. * This function sets the type in the ast entry.
  950. *
  951. * Return:
  952. */
  953. void dp_peer_ast_set_type(struct dp_soc *soc,
  954. struct dp_ast_entry *ast_entry,
  955. enum cdp_txrx_ast_entry_type type)
  956. {
  957. ast_entry->type = type;
  958. }
  959. #else
  960. int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  961. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  962. uint32_t flags)
  963. {
  964. return 1;
  965. }
  966. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
  967. {
  968. }
  969. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  970. struct dp_ast_entry *ast_entry, uint32_t flags)
  971. {
  972. return 1;
  973. }
  974. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  975. uint8_t *ast_mac_addr)
  976. {
  977. return NULL;
  978. }
  979. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  980. uint8_t *ast_mac_addr,
  981. uint8_t pdev_id)
  982. {
  983. return NULL;
  984. }
  985. static int dp_peer_ast_hash_attach(struct dp_soc *soc)
  986. {
  987. return 0;
  988. }
  989. static inline void dp_peer_map_ast(struct dp_soc *soc,
  990. struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
  991. uint8_t vdev_id, uint16_t ast_hash)
  992. {
  993. return;
  994. }
  995. static void dp_peer_ast_hash_detach(struct dp_soc *soc)
  996. {
  997. }
  998. void dp_peer_ast_set_type(struct dp_soc *soc,
  999. struct dp_ast_entry *ast_entry,
  1000. enum cdp_txrx_ast_entry_type type)
  1001. {
  1002. }
  1003. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  1004. struct dp_ast_entry *ast_entry)
  1005. {
  1006. return 0xff;
  1007. }
  1008. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  1009. struct dp_ast_entry *ast_entry)
  1010. {
  1011. return 0xff;
  1012. }
  1013. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  1014. struct dp_ast_entry *ast_entry, uint32_t flags)
  1015. {
  1016. return 1;
  1017. }
  1018. #endif
  1019. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  1020. struct dp_ast_entry *ast_entry)
  1021. {
  1022. struct dp_peer *peer = ast_entry->peer;
  1023. struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
  1024. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
  1025. "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
  1026. __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
  1027. peer->vdev->vdev_id, ast_entry->mac_addr.raw,
  1028. ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
  1029. if (ast_entry->next_hop) {
  1030. cdp_soc->ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
  1031. peer->vdev->vdev_id,
  1032. ast_entry->mac_addr.raw,
  1033. ast_entry->type);
  1034. }
  1035. }
  1036. /**
  1037. * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
  1038. * @soc: soc handle
  1039. * @peer: peer handle
  1040. * @mac_addr: mac address of the AST entry to searc and delete
  1041. *
  1042. * find the ast entry from the peer list using the mac address and free
  1043. * the entry.
  1044. *
  1045. * Return: SUCCESS or NOENT
  1046. */
  1047. static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
  1048. struct dp_peer *peer,
  1049. uint8_t *mac_addr)
  1050. {
  1051. struct dp_ast_entry *ast_entry;
  1052. void *cookie = NULL;
  1053. txrx_ast_free_cb cb = NULL;
  1054. /*
  1055. * release the reference only if it is mapped
  1056. * to ast_table
  1057. */
  1058. qdf_spin_lock_bh(&soc->ast_lock);
  1059. ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
  1060. if (!ast_entry) {
  1061. qdf_spin_unlock_bh(&soc->ast_lock);
  1062. return QDF_STATUS_E_NOENT;
  1063. } else if (ast_entry->is_mapped) {
  1064. soc->ast_table[ast_entry->ast_idx] = NULL;
  1065. }
  1066. cb = ast_entry->callback;
  1067. cookie = ast_entry->cookie;
  1068. dp_peer_unlink_ast_entry(soc, ast_entry);
  1069. dp_peer_free_ast_entry(soc, ast_entry);
  1070. qdf_spin_unlock_bh(&soc->ast_lock);
  1071. if (cb) {
  1072. cb(soc->ctrl_psoc,
  1073. dp_soc_to_cdp_soc(soc),
  1074. cookie,
  1075. CDP_TXRX_AST_DELETED);
  1076. }
  1077. return QDF_STATUS_SUCCESS;
  1078. }
  1079. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  1080. uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
  1081. {
  1082. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  1083. unsigned index;
  1084. struct dp_peer *peer;
  1085. if (mac_addr_is_aligned) {
  1086. mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
  1087. } else {
  1088. qdf_mem_copy(
  1089. &local_mac_addr_aligned.raw[0],
  1090. peer_mac_addr, QDF_MAC_ADDR_SIZE);
  1091. mac_addr = &local_mac_addr_aligned;
  1092. }
  1093. index = dp_peer_find_hash_index(soc, mac_addr);
  1094. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1095. TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
  1096. if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
  1097. ((peer->vdev->vdev_id == vdev_id) ||
  1098. (vdev_id == DP_VDEV_ALL))) {
  1099. /* found it - increment the ref count before releasing
  1100. * the lock
  1101. */
  1102. qdf_atomic_inc(&peer->ref_cnt);
  1103. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1104. return peer;
  1105. }
  1106. }
  1107. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1108. return NULL; /* failure */
  1109. }
  1110. void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
  1111. {
  1112. unsigned index;
  1113. struct dp_peer *tmppeer = NULL;
  1114. int found = 0;
  1115. index = dp_peer_find_hash_index(soc, &peer->mac_addr);
  1116. /* Check if tail is not empty before delete*/
  1117. QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
  1118. /*
  1119. * DO NOT take the peer_ref_mutex lock here - it needs to be taken
  1120. * by the caller.
  1121. * The caller needs to hold the lock from the time the peer object's
  1122. * reference count is decremented and tested up through the time the
  1123. * reference to the peer object is removed from the hash table, by
  1124. * this function.
  1125. * Holding the lock only while removing the peer object reference
  1126. * from the hash table keeps the hash table consistent, but does not
  1127. * protect against a new HL tx context starting to use the peer object
  1128. * if it looks up the peer object from its MAC address just after the
  1129. * peer ref count is decremented to zero, but just before the peer
  1130. * object reference is removed from the hash table.
  1131. */
  1132. TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
  1133. if (tmppeer == peer) {
  1134. found = 1;
  1135. break;
  1136. }
  1137. }
  1138. QDF_ASSERT(found);
  1139. TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
  1140. }
  1141. void dp_peer_find_hash_erase(struct dp_soc *soc)
  1142. {
  1143. int i;
  1144. /*
  1145. * Not really necessary to take peer_ref_mutex lock - by this point,
  1146. * it's known that the soc is no longer in use.
  1147. */
  1148. for (i = 0; i <= soc->peer_hash.mask; i++) {
  1149. if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
  1150. struct dp_peer *peer, *peer_next;
  1151. /*
  1152. * TAILQ_FOREACH_SAFE must be used here to avoid any
  1153. * memory access violation after peer is freed
  1154. */
  1155. TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
  1156. hash_list_elem, peer_next) {
  1157. /*
  1158. * Don't remove the peer from the hash table -
  1159. * that would modify the list we are currently
  1160. * traversing, and it's not necessary anyway.
  1161. */
  1162. /*
  1163. * Artificially adjust the peer's ref count to
  1164. * 1, so it will get deleted by
  1165. * dp_peer_unref_delete.
  1166. */
  1167. /* set to zero */
  1168. qdf_atomic_init(&peer->ref_cnt);
  1169. /* incr to one */
  1170. qdf_atomic_inc(&peer->ref_cnt);
  1171. dp_peer_unref_delete(peer);
  1172. }
  1173. }
  1174. }
  1175. }
  1176. static void dp_peer_ast_table_detach(struct dp_soc *soc)
  1177. {
  1178. if (soc->ast_table) {
  1179. qdf_mem_free(soc->ast_table);
  1180. soc->ast_table = NULL;
  1181. }
  1182. }
  1183. static void dp_peer_find_map_detach(struct dp_soc *soc)
  1184. {
  1185. if (soc->peer_id_to_obj_map) {
  1186. qdf_mem_free(soc->peer_id_to_obj_map);
  1187. soc->peer_id_to_obj_map = NULL;
  1188. }
  1189. }
  1190. int dp_peer_find_attach(struct dp_soc *soc)
  1191. {
  1192. if (dp_peer_find_map_attach(soc))
  1193. return 1;
  1194. if (dp_peer_find_hash_attach(soc)) {
  1195. dp_peer_find_map_detach(soc);
  1196. return 1;
  1197. }
  1198. if (dp_peer_ast_table_attach(soc)) {
  1199. dp_peer_find_hash_detach(soc);
  1200. dp_peer_find_map_detach(soc);
  1201. return 1;
  1202. }
  1203. if (dp_peer_ast_hash_attach(soc)) {
  1204. dp_peer_ast_table_detach(soc);
  1205. dp_peer_find_hash_detach(soc);
  1206. dp_peer_find_map_detach(soc);
  1207. return 1;
  1208. }
  1209. return 0; /* success */
  1210. }
  1211. void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  1212. union hal_reo_status *reo_status)
  1213. {
  1214. struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
  1215. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  1216. if (queue_status->header.status == HAL_REO_CMD_DRAIN)
  1217. return;
  1218. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  1219. DP_PRINT_STATS("REO stats failure %d for TID %d\n",
  1220. queue_status->header.status, rx_tid->tid);
  1221. return;
  1222. }
  1223. DP_PRINT_STATS("REO queue stats (TID: %d):\n"
  1224. "ssn: %d\n"
  1225. "curr_idx : %d\n"
  1226. "pn_31_0 : %08x\n"
  1227. "pn_63_32 : %08x\n"
  1228. "pn_95_64 : %08x\n"
  1229. "pn_127_96 : %08x\n"
  1230. "last_rx_enq_tstamp : %08x\n"
  1231. "last_rx_deq_tstamp : %08x\n"
  1232. "rx_bitmap_31_0 : %08x\n"
  1233. "rx_bitmap_63_32 : %08x\n"
  1234. "rx_bitmap_95_64 : %08x\n"
  1235. "rx_bitmap_127_96 : %08x\n"
  1236. "rx_bitmap_159_128 : %08x\n"
  1237. "rx_bitmap_191_160 : %08x\n"
  1238. "rx_bitmap_223_192 : %08x\n"
  1239. "rx_bitmap_255_224 : %08x\n",
  1240. rx_tid->tid,
  1241. queue_status->ssn, queue_status->curr_idx,
  1242. queue_status->pn_31_0, queue_status->pn_63_32,
  1243. queue_status->pn_95_64, queue_status->pn_127_96,
  1244. queue_status->last_rx_enq_tstamp,
  1245. queue_status->last_rx_deq_tstamp,
  1246. queue_status->rx_bitmap_31_0,
  1247. queue_status->rx_bitmap_63_32,
  1248. queue_status->rx_bitmap_95_64,
  1249. queue_status->rx_bitmap_127_96,
  1250. queue_status->rx_bitmap_159_128,
  1251. queue_status->rx_bitmap_191_160,
  1252. queue_status->rx_bitmap_223_192,
  1253. queue_status->rx_bitmap_255_224);
  1254. DP_PRINT_STATS(
  1255. "curr_mpdu_cnt : %d\n"
  1256. "curr_msdu_cnt : %d\n"
  1257. "fwd_timeout_cnt : %d\n"
  1258. "fwd_bar_cnt : %d\n"
  1259. "dup_cnt : %d\n"
  1260. "frms_in_order_cnt : %d\n"
  1261. "bar_rcvd_cnt : %d\n"
  1262. "mpdu_frms_cnt : %d\n"
  1263. "msdu_frms_cnt : %d\n"
  1264. "total_byte_cnt : %d\n"
  1265. "late_recv_mpdu_cnt : %d\n"
  1266. "win_jump_2k : %d\n"
  1267. "hole_cnt : %d\n",
  1268. queue_status->curr_mpdu_cnt,
  1269. queue_status->curr_msdu_cnt,
  1270. queue_status->fwd_timeout_cnt,
  1271. queue_status->fwd_bar_cnt,
  1272. queue_status->dup_cnt,
  1273. queue_status->frms_in_order_cnt,
  1274. queue_status->bar_rcvd_cnt,
  1275. queue_status->mpdu_frms_cnt,
  1276. queue_status->msdu_frms_cnt,
  1277. queue_status->total_cnt,
  1278. queue_status->late_recv_mpdu_cnt,
  1279. queue_status->win_jump_2k,
  1280. queue_status->hole_cnt);
  1281. DP_PRINT_STATS("Addba Req : %d\n"
  1282. "Addba Resp : %d\n"
  1283. "Addba Resp success : %d\n"
  1284. "Addba Resp failed : %d\n"
  1285. "Delba Req received : %d\n"
  1286. "Delba Tx success : %d\n"
  1287. "Delba Tx Fail : %d\n"
  1288. "BA window size : %d\n"
  1289. "Pn size : %d\n",
  1290. rx_tid->num_of_addba_req,
  1291. rx_tid->num_of_addba_resp,
  1292. rx_tid->num_addba_rsp_success,
  1293. rx_tid->num_addba_rsp_failed,
  1294. rx_tid->num_of_delba_req,
  1295. rx_tid->delba_tx_success_cnt,
  1296. rx_tid->delba_tx_fail_cnt,
  1297. rx_tid->ba_win_size,
  1298. rx_tid->pn_size);
  1299. }
  1300. static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
  1301. uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
  1302. uint8_t vdev_id)
  1303. {
  1304. struct dp_peer *peer;
  1305. QDF_ASSERT(peer_id <= soc->max_peers);
  1306. /* check if there's already a peer object with this MAC address */
  1307. peer = dp_peer_find_hash_find(soc, peer_mac_addr,
  1308. 0 /* is aligned */, vdev_id);
  1309. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1310. "%s: peer %pK ID %d vid %d mac %pM",
  1311. __func__, peer, peer_id, vdev_id, peer_mac_addr);
  1312. if (peer) {
  1313. /* peer's ref count was already incremented by
  1314. * peer_find_hash_find
  1315. */
  1316. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1317. "%s: ref_cnt: %d", __func__,
  1318. qdf_atomic_read(&peer->ref_cnt));
  1319. if (!soc->peer_id_to_obj_map[peer_id])
  1320. soc->peer_id_to_obj_map[peer_id] = peer;
  1321. else {
  1322. /* Peer map event came for peer_id which
  1323. * is already mapped, this is not expected
  1324. */
  1325. QDF_ASSERT(0);
  1326. }
  1327. if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
  1328. /* TBDXXX: assert for now */
  1329. QDF_ASSERT(0);
  1330. }
  1331. return peer;
  1332. }
  1333. return NULL;
  1334. }
  1335. /**
  1336. * dp_rx_peer_map_handler() - handle peer map event from firmware
  1337. * @soc_handle - genereic soc handle
  1338. * @peeri_id - peer_id from firmware
  1339. * @hw_peer_id - ast index for this peer
  1340. * @vdev_id - vdev ID
  1341. * @peer_mac_addr - mac address of the peer
  1342. * @ast_hash - ast hash value
  1343. * @is_wds - flag to indicate peer map event for WDS ast entry
  1344. *
  1345. * associate the peer_id that firmware provided with peer entry
  1346. * and update the ast table in the host with the hw_peer_id.
  1347. *
  1348. * Return: none
  1349. */
  1350. void
  1351. dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
  1352. uint16_t hw_peer_id, uint8_t vdev_id,
  1353. uint8_t *peer_mac_addr, uint16_t ast_hash,
  1354. uint8_t is_wds)
  1355. {
  1356. struct dp_peer *peer = NULL;
  1357. enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
  1358. dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac %pM, vdev_id %d",
  1359. soc, peer_id, hw_peer_id,
  1360. peer_mac_addr, vdev_id);
  1361. /* Peer map event for WDS ast entry get the peer from
  1362. * obj map
  1363. */
  1364. if (is_wds) {
  1365. peer = soc->peer_id_to_obj_map[peer_id];
  1366. /*
  1367. * In certain cases like Auth attack on a repeater
  1368. * can result in the number of ast_entries falling
  1369. * in the same hash bucket to exceed the max_skid
  1370. * length supported by HW in root AP. In these cases
  1371. * the FW will return the hw_peer_id (ast_index) as
  1372. * 0xffff indicating HW could not add the entry in
  1373. * its table. Host has to delete the entry from its
  1374. * table in these cases.
  1375. */
  1376. if (hw_peer_id == HTT_INVALID_PEER) {
  1377. DP_STATS_INC(soc, ast.map_err, 1);
  1378. if (!dp_peer_ast_free_entry_by_mac(soc,
  1379. peer,
  1380. peer_mac_addr))
  1381. return;
  1382. dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
  1383. peer, peer->peer_ids[0],
  1384. peer->mac_addr.raw, peer_mac_addr, vdev_id,
  1385. is_wds);
  1386. return;
  1387. }
  1388. } else {
  1389. /*
  1390. * It's the responsibility of the CP and FW to ensure
  1391. * that peer is created successfully. Ideally DP should
  1392. * not hit the below condition for directly assocaited
  1393. * peers.
  1394. */
  1395. if ((hw_peer_id < 0) ||
  1396. (hw_peer_id >=
  1397. wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) {
  1398. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1399. "invalid hw_peer_id: %d", hw_peer_id);
  1400. qdf_assert_always(0);
  1401. }
  1402. peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
  1403. hw_peer_id, vdev_id);
  1404. if (peer) {
  1405. if (wlan_op_mode_sta == peer->vdev->opmode &&
  1406. qdf_mem_cmp(peer->mac_addr.raw,
  1407. peer->vdev->mac_addr.raw,
  1408. QDF_MAC_ADDR_SIZE) != 0) {
  1409. dp_info("STA vdev bss_peer!!!!");
  1410. peer->bss_peer = 1;
  1411. peer->vdev->vap_bss_peer = peer;
  1412. qdf_mem_copy(peer->vdev->vap_bss_peer_mac_addr,
  1413. peer->mac_addr.raw,
  1414. QDF_MAC_ADDR_SIZE);
  1415. }
  1416. if (peer->vdev->opmode == wlan_op_mode_sta) {
  1417. peer->vdev->bss_ast_hash = ast_hash;
  1418. peer->vdev->bss_ast_idx = hw_peer_id;
  1419. }
  1420. /* Add ast entry incase self ast entry is
  1421. * deleted due to DP CP sync issue
  1422. *
  1423. * self_ast_entry is modified in peer create
  1424. * and peer unmap path which cannot run in
  1425. * parllel with peer map, no lock need before
  1426. * referring it
  1427. */
  1428. if (!peer->self_ast_entry) {
  1429. dp_info("Add self ast from map %pM",
  1430. peer_mac_addr);
  1431. dp_peer_add_ast(soc, peer,
  1432. peer_mac_addr,
  1433. type, 0);
  1434. }
  1435. }
  1436. }
  1437. dp_peer_map_ast(soc, peer, peer_mac_addr,
  1438. hw_peer_id, vdev_id, ast_hash);
  1439. }
  1440. /**
  1441. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  1442. * @soc_handle - genereic soc handle
  1443. * @peeri_id - peer_id from firmware
  1444. * @vdev_id - vdev ID
  1445. * @mac_addr - mac address of the peer or wds entry
  1446. * @is_wds - flag to indicate peer map event for WDS ast entry
  1447. *
  1448. * Return: none
  1449. */
  1450. void
  1451. dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
  1452. uint8_t vdev_id, uint8_t *mac_addr,
  1453. uint8_t is_wds)
  1454. {
  1455. struct dp_peer *peer;
  1456. uint8_t i;
  1457. peer = __dp_peer_find_by_id(soc, peer_id);
  1458. /*
  1459. * Currently peer IDs are assigned for vdevs as well as peers.
  1460. * If the peer ID is for a vdev, then the peer pointer stored
  1461. * in peer_id_to_obj_map will be NULL.
  1462. */
  1463. if (!peer) {
  1464. dp_err("Received unmap event for invalid peer_id %u", peer_id);
  1465. return;
  1466. }
  1467. /* If V2 Peer map messages are enabled AST entry has to be freed here
  1468. */
  1469. if (is_wds) {
  1470. if (!dp_peer_ast_free_entry_by_mac(soc, peer, mac_addr))
  1471. return;
  1472. dp_alert("AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u",
  1473. peer, peer->peer_ids[0],
  1474. peer->mac_addr.raw, mac_addr, vdev_id,
  1475. is_wds);
  1476. return;
  1477. }
  1478. dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
  1479. soc, peer_id, peer);
  1480. soc->peer_id_to_obj_map[peer_id] = NULL;
  1481. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
  1482. if (peer->peer_ids[i] == peer_id) {
  1483. peer->peer_ids[i] = HTT_INVALID_PEER;
  1484. break;
  1485. }
  1486. }
  1487. /*
  1488. * Reset ast flow mapping table
  1489. */
  1490. dp_peer_reset_flowq_map(peer);
  1491. if (soc->cdp_soc.ol_ops->peer_unmap_event) {
  1492. soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
  1493. peer_id, vdev_id);
  1494. }
  1495. /*
  1496. * Remove a reference to the peer.
  1497. * If there are no more references, delete the peer object.
  1498. */
  1499. dp_peer_unref_delete(peer);
  1500. }
  1501. void
  1502. dp_peer_find_detach(struct dp_soc *soc)
  1503. {
  1504. dp_peer_find_map_detach(soc);
  1505. dp_peer_find_hash_detach(soc);
  1506. dp_peer_ast_hash_detach(soc);
  1507. dp_peer_ast_table_detach(soc);
  1508. }
  1509. static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
  1510. union hal_reo_status *reo_status)
  1511. {
  1512. struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
  1513. if ((reo_status->rx_queue_status.header.status !=
  1514. HAL_REO_CMD_SUCCESS) &&
  1515. (reo_status->rx_queue_status.header.status !=
  1516. HAL_REO_CMD_DRAIN)) {
  1517. /* Should not happen normally. Just print error for now */
  1518. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1519. "%s: Rx tid HW desc update failed(%d): tid %d",
  1520. __func__,
  1521. reo_status->rx_queue_status.header.status,
  1522. rx_tid->tid);
  1523. }
  1524. }
  1525. /*
  1526. * dp_find_peer_by_addr - find peer instance by mac address
  1527. * @dev: physical device instance
  1528. * @peer_mac_addr: peer mac address
  1529. *
  1530. * Return: peer instance pointer
  1531. */
  1532. void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr)
  1533. {
  1534. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  1535. struct dp_peer *peer;
  1536. peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
  1537. if (!peer)
  1538. return NULL;
  1539. dp_verbose_debug("peer %pK mac: %pM", peer,
  1540. peer->mac_addr.raw);
  1541. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  1542. * Decrement it here.
  1543. */
  1544. dp_peer_unref_delete(peer);
  1545. return peer;
  1546. }
  1547. static bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
  1548. {
  1549. struct ol_if_ops *ol_ops = NULL;
  1550. bool is_roaming = false;
  1551. uint8_t vdev_id = -1;
  1552. struct cdp_soc_t *soc;
  1553. if (!peer) {
  1554. dp_info("Peer is NULL. No roaming possible");
  1555. return false;
  1556. }
  1557. soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
  1558. ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
  1559. if (ol_ops && ol_ops->is_roam_inprogress) {
  1560. dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
  1561. is_roaming = ol_ops->is_roam_inprogress(vdev_id);
  1562. }
  1563. dp_info("peer: %pM, vdev_id: %d, is_roaming: %d",
  1564. peer->mac_addr.raw, vdev_id, is_roaming);
  1565. return is_roaming;
  1566. }
  1567. QDF_STATUS dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
  1568. ba_window_size, uint32_t start_seq)
  1569. {
  1570. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  1571. struct dp_soc *soc = peer->vdev->pdev->soc;
  1572. struct hal_reo_cmd_params params;
  1573. qdf_mem_zero(&params, sizeof(params));
  1574. params.std.need_status = 1;
  1575. params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
  1576. params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  1577. params.u.upd_queue_params.update_ba_window_size = 1;
  1578. params.u.upd_queue_params.ba_window_size = ba_window_size;
  1579. if (start_seq < IEEE80211_SEQ_MAX) {
  1580. params.u.upd_queue_params.update_ssn = 1;
  1581. params.u.upd_queue_params.ssn = start_seq;
  1582. } else {
  1583. dp_set_ssn_valid_flag(&params, 0);
  1584. }
  1585. if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
  1586. dp_rx_tid_update_cb, rx_tid)) {
  1587. dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
  1588. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  1589. }
  1590. rx_tid->ba_win_size = ba_window_size;
  1591. if (dp_get_peer_vdev_roaming_in_progress(peer))
  1592. return QDF_STATUS_E_PERM;
  1593. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup)
  1594. soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  1595. soc->ctrl_psoc, peer->vdev->pdev->pdev_id,
  1596. peer->vdev->vdev_id, peer->mac_addr.raw,
  1597. rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
  1598. return QDF_STATUS_SUCCESS;
  1599. }
  1600. /*
  1601. * dp_reo_desc_free() - Callback free reo descriptor memory after
  1602. * HW cache flush
  1603. *
  1604. * @soc: DP SOC handle
  1605. * @cb_ctxt: Callback context
  1606. * @reo_status: REO command status
  1607. */
  1608. static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
  1609. union hal_reo_status *reo_status)
  1610. {
  1611. struct reo_desc_list_node *freedesc =
  1612. (struct reo_desc_list_node *)cb_ctxt;
  1613. struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
  1614. if ((reo_status->fl_cache_status.header.status !=
  1615. HAL_REO_CMD_SUCCESS) &&
  1616. (reo_status->fl_cache_status.header.status !=
  1617. HAL_REO_CMD_DRAIN)) {
  1618. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1619. "%s: Rx tid HW desc flush failed(%d): tid %d",
  1620. __func__,
  1621. reo_status->rx_queue_status.header.status,
  1622. freedesc->rx_tid.tid);
  1623. }
  1624. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1625. "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
  1626. (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
  1627. qdf_mem_unmap_nbytes_single(soc->osdev,
  1628. rx_tid->hw_qdesc_paddr,
  1629. QDF_DMA_BIDIRECTIONAL,
  1630. rx_tid->hw_qdesc_alloc_size);
  1631. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1632. qdf_mem_free(freedesc);
  1633. }
  1634. #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
  1635. /* Hawkeye emulation requires bus address to be >= 0x50000000 */
  1636. static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
  1637. {
  1638. if (dma_addr < 0x50000000)
  1639. return QDF_STATUS_E_FAILURE;
  1640. else
  1641. return QDF_STATUS_SUCCESS;
  1642. }
  1643. #else
  1644. static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
  1645. {
  1646. return QDF_STATUS_SUCCESS;
  1647. }
  1648. #endif
  1649. /*
  1650. * dp_rx_tid_setup_wifi3() – Setup receive TID state
  1651. * @peer: Datapath peer handle
  1652. * @tid: TID
  1653. * @ba_window_size: BlockAck window size
  1654. * @start_seq: Starting sequence number
  1655. *
  1656. * Return: QDF_STATUS code
  1657. */
  1658. QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
  1659. uint32_t ba_window_size, uint32_t start_seq)
  1660. {
  1661. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  1662. struct dp_vdev *vdev = peer->vdev;
  1663. struct dp_soc *soc = vdev->pdev->soc;
  1664. uint32_t hw_qdesc_size;
  1665. uint32_t hw_qdesc_align;
  1666. int hal_pn_type;
  1667. void *hw_qdesc_vaddr;
  1668. uint32_t alloc_tries = 0;
  1669. QDF_STATUS err = QDF_STATUS_SUCCESS;
  1670. if (peer->delete_in_progress ||
  1671. !qdf_atomic_read(&peer->is_default_route_set))
  1672. return QDF_STATUS_E_FAILURE;
  1673. rx_tid->ba_win_size = ba_window_size;
  1674. if (rx_tid->hw_qdesc_vaddr_unaligned)
  1675. return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
  1676. start_seq);
  1677. rx_tid->delba_tx_status = 0;
  1678. rx_tid->ppdu_id_2k = 0;
  1679. rx_tid->num_of_addba_req = 0;
  1680. rx_tid->num_of_delba_req = 0;
  1681. rx_tid->num_of_addba_resp = 0;
  1682. rx_tid->num_addba_rsp_failed = 0;
  1683. rx_tid->num_addba_rsp_success = 0;
  1684. rx_tid->delba_tx_success_cnt = 0;
  1685. rx_tid->delba_tx_fail_cnt = 0;
  1686. rx_tid->statuscode = 0;
  1687. /* TODO: Allocating HW queue descriptors based on max BA window size
  1688. * for all QOS TIDs so that same descriptor can be used later when
  1689. * ADDBA request is recevied. This should be changed to allocate HW
  1690. * queue descriptors based on BA window size being negotiated (0 for
  1691. * non BA cases), and reallocate when BA window size changes and also
  1692. * send WMI message to FW to change the REO queue descriptor in Rx
  1693. * peer entry as part of dp_rx_tid_update.
  1694. */
  1695. if (tid != DP_NON_QOS_TID)
  1696. hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
  1697. HAL_RX_MAX_BA_WINDOW, tid);
  1698. else
  1699. hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
  1700. ba_window_size, tid);
  1701. hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
  1702. /* To avoid unnecessary extra allocation for alignment, try allocating
  1703. * exact size and see if we already have aligned address.
  1704. */
  1705. rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
  1706. try_desc_alloc:
  1707. rx_tid->hw_qdesc_vaddr_unaligned =
  1708. qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
  1709. if (!rx_tid->hw_qdesc_vaddr_unaligned) {
  1710. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1711. "%s: Rx tid HW desc alloc failed: tid %d",
  1712. __func__, tid);
  1713. return QDF_STATUS_E_NOMEM;
  1714. }
  1715. if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
  1716. hw_qdesc_align) {
  1717. /* Address allocated above is not alinged. Allocate extra
  1718. * memory for alignment
  1719. */
  1720. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1721. rx_tid->hw_qdesc_vaddr_unaligned =
  1722. qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
  1723. hw_qdesc_align - 1);
  1724. if (!rx_tid->hw_qdesc_vaddr_unaligned) {
  1725. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1726. "%s: Rx tid HW desc alloc failed: tid %d",
  1727. __func__, tid);
  1728. return QDF_STATUS_E_NOMEM;
  1729. }
  1730. hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
  1731. rx_tid->hw_qdesc_vaddr_unaligned,
  1732. hw_qdesc_align);
  1733. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1734. "%s: Total Size %d Aligned Addr %pK",
  1735. __func__, rx_tid->hw_qdesc_alloc_size,
  1736. hw_qdesc_vaddr);
  1737. } else {
  1738. hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
  1739. }
  1740. /* TODO: Ensure that sec_type is set before ADDBA is received.
  1741. * Currently this is set based on htt indication
  1742. * HTT_T2H_MSG_TYPE_SEC_IND from target
  1743. */
  1744. switch (peer->security[dp_sec_ucast].sec_type) {
  1745. case cdp_sec_type_tkip_nomic:
  1746. case cdp_sec_type_aes_ccmp:
  1747. case cdp_sec_type_aes_ccmp_256:
  1748. case cdp_sec_type_aes_gcmp:
  1749. case cdp_sec_type_aes_gcmp_256:
  1750. hal_pn_type = HAL_PN_WPA;
  1751. break;
  1752. case cdp_sec_type_wapi:
  1753. if (vdev->opmode == wlan_op_mode_ap)
  1754. hal_pn_type = HAL_PN_WAPI_EVEN;
  1755. else
  1756. hal_pn_type = HAL_PN_WAPI_UNEVEN;
  1757. break;
  1758. default:
  1759. hal_pn_type = HAL_PN_NONE;
  1760. break;
  1761. }
  1762. hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
  1763. hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
  1764. qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
  1765. QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
  1766. &(rx_tid->hw_qdesc_paddr));
  1767. if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
  1768. QDF_STATUS_SUCCESS) {
  1769. if (alloc_tries++ < 10) {
  1770. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1771. rx_tid->hw_qdesc_vaddr_unaligned = NULL;
  1772. goto try_desc_alloc;
  1773. } else {
  1774. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1775. "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
  1776. __func__, tid);
  1777. err = QDF_STATUS_E_NOMEM;
  1778. goto error;
  1779. }
  1780. }
  1781. if (dp_get_peer_vdev_roaming_in_progress(peer)) {
  1782. err = QDF_STATUS_E_PERM;
  1783. goto error;
  1784. }
  1785. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  1786. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  1787. soc->ctrl_psoc,
  1788. peer->vdev->pdev->pdev_id,
  1789. peer->vdev->vdev_id,
  1790. peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
  1791. 1, ba_window_size)) {
  1792. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1793. "%s: Failed to send reo queue setup to FW - tid %d\n",
  1794. __func__, tid);
  1795. err = QDF_STATUS_E_FAILURE;
  1796. goto error;
  1797. }
  1798. }
  1799. return 0;
  1800. error:
  1801. if (rx_tid->hw_qdesc_vaddr_unaligned) {
  1802. if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
  1803. QDF_STATUS_SUCCESS)
  1804. qdf_mem_unmap_nbytes_single(
  1805. soc->osdev,
  1806. rx_tid->hw_qdesc_paddr,
  1807. QDF_DMA_BIDIRECTIONAL,
  1808. rx_tid->hw_qdesc_alloc_size);
  1809. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1810. rx_tid->hw_qdesc_vaddr_unaligned = NULL;
  1811. }
  1812. return err;
  1813. }
  1814. #ifdef REO_DESC_DEFER_FREE
  1815. /*
  1816. * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
  1817. * desc back to freelist and defer the deletion
  1818. *
  1819. * @soc: DP SOC handle
  1820. * @desc: Base descriptor to be freed
  1821. * @reo_status: REO command status
  1822. */
  1823. static void dp_reo_desc_clean_up(struct dp_soc *soc,
  1824. struct reo_desc_list_node *desc,
  1825. union hal_reo_status *reo_status)
  1826. {
  1827. desc->free_ts = qdf_get_system_timestamp();
  1828. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  1829. qdf_list_insert_back(&soc->reo_desc_freelist,
  1830. (qdf_list_node_t *)desc);
  1831. }
  1832. #else
  1833. /*
  1834. * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
  1835. * cache fails free the base REO desc anyway
  1836. *
  1837. * @soc: DP SOC handle
  1838. * @desc: Base descriptor to be freed
  1839. * @reo_status: REO command status
  1840. */
  1841. static void dp_reo_desc_clean_up(struct dp_soc *soc,
  1842. struct reo_desc_list_node *desc,
  1843. union hal_reo_status *reo_status)
  1844. {
  1845. if (reo_status) {
  1846. qdf_mem_zero(reo_status, sizeof(*reo_status));
  1847. reo_status->fl_cache_status.header.status = 0;
  1848. dp_reo_desc_free(soc, (void *)desc, reo_status);
  1849. }
  1850. }
  1851. #endif
  1852. /*
  1853. * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
  1854. * cmd and re-insert desc into free list if send fails.
  1855. *
  1856. * @soc: DP SOC handle
  1857. * @desc: desc with resend update cmd flag set
  1858. * @rx_tid: Desc RX tid associated with update cmd for resetting
  1859. * valid field to 0 in h/w
  1860. */
  1861. static void dp_resend_update_reo_cmd(struct dp_soc *soc,
  1862. struct reo_desc_list_node *desc,
  1863. struct dp_rx_tid *rx_tid)
  1864. {
  1865. struct hal_reo_cmd_params params;
  1866. qdf_mem_zero(&params, sizeof(params));
  1867. params.std.need_status = 1;
  1868. params.std.addr_lo =
  1869. rx_tid->hw_qdesc_paddr & 0xffffffff;
  1870. params.std.addr_hi =
  1871. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  1872. params.u.upd_queue_params.update_vld = 1;
  1873. params.u.upd_queue_params.vld = 0;
  1874. desc->resend_update_reo_cmd = false;
  1875. /*
  1876. * If the cmd send fails then set resend_update_reo_cmd flag
  1877. * and insert the desc at the end of the free list to retry.
  1878. */
  1879. if (dp_reo_send_cmd(soc,
  1880. CMD_UPDATE_RX_REO_QUEUE,
  1881. &params,
  1882. dp_rx_tid_delete_cb,
  1883. (void *)desc)
  1884. != QDF_STATUS_SUCCESS) {
  1885. desc->resend_update_reo_cmd = true;
  1886. desc->free_ts = qdf_get_system_timestamp();
  1887. qdf_list_insert_back(&soc->reo_desc_freelist,
  1888. (qdf_list_node_t *)desc);
  1889. dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
  1890. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  1891. }
  1892. }
  1893. /*
  1894. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  1895. * after deleting the entries (ie., setting valid=0)
  1896. *
  1897. * @soc: DP SOC handle
  1898. * @cb_ctxt: Callback context
  1899. * @reo_status: REO command status
  1900. */
  1901. void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
  1902. union hal_reo_status *reo_status)
  1903. {
  1904. struct reo_desc_list_node *freedesc =
  1905. (struct reo_desc_list_node *)cb_ctxt;
  1906. uint32_t list_size;
  1907. struct reo_desc_list_node *desc;
  1908. unsigned long curr_ts = qdf_get_system_timestamp();
  1909. uint32_t desc_size, tot_desc_size;
  1910. struct hal_reo_cmd_params params;
  1911. if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
  1912. qdf_mem_zero(reo_status, sizeof(*reo_status));
  1913. reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
  1914. dp_reo_desc_free(soc, (void *)freedesc, reo_status);
  1915. return;
  1916. } else if (reo_status->rx_queue_status.header.status !=
  1917. HAL_REO_CMD_SUCCESS) {
  1918. /* Should not happen normally. Just print error for now */
  1919. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1920. "%s: Rx tid HW desc deletion failed(%d): tid %d",
  1921. __func__,
  1922. reo_status->rx_queue_status.header.status,
  1923. freedesc->rx_tid.tid);
  1924. }
  1925. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  1926. "%s: rx_tid: %d status: %d", __func__,
  1927. freedesc->rx_tid.tid,
  1928. reo_status->rx_queue_status.header.status);
  1929. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  1930. freedesc->free_ts = curr_ts;
  1931. qdf_list_insert_back_size(&soc->reo_desc_freelist,
  1932. (qdf_list_node_t *)freedesc, &list_size);
  1933. while ((qdf_list_peek_front(&soc->reo_desc_freelist,
  1934. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
  1935. ((list_size >= REO_DESC_FREELIST_SIZE) ||
  1936. (curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
  1937. (desc->resend_update_reo_cmd && list_size))) {
  1938. struct dp_rx_tid *rx_tid;
  1939. qdf_list_remove_front(&soc->reo_desc_freelist,
  1940. (qdf_list_node_t **)&desc);
  1941. list_size--;
  1942. rx_tid = &desc->rx_tid;
  1943. /* First process descs with resend_update_reo_cmd set */
  1944. if (desc->resend_update_reo_cmd) {
  1945. dp_resend_update_reo_cmd(soc, desc, rx_tid);
  1946. continue;
  1947. }
  1948. /* Flush and invalidate REO descriptor from HW cache: Base and
  1949. * extension descriptors should be flushed separately */
  1950. tot_desc_size = rx_tid->hw_qdesc_alloc_size;
  1951. /* Get base descriptor size by passing non-qos TID */
  1952. desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
  1953. DP_NON_QOS_TID);
  1954. /* Flush reo extension descriptors */
  1955. while ((tot_desc_size -= desc_size) > 0) {
  1956. qdf_mem_zero(&params, sizeof(params));
  1957. params.std.addr_lo =
  1958. ((uint64_t)(rx_tid->hw_qdesc_paddr) +
  1959. tot_desc_size) & 0xffffffff;
  1960. params.std.addr_hi =
  1961. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  1962. if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
  1963. CMD_FLUSH_CACHE,
  1964. &params,
  1965. NULL,
  1966. NULL)) {
  1967. dp_err_rl("fail to send CMD_CACHE_FLUSH:"
  1968. "tid %d desc %pK", rx_tid->tid,
  1969. (void *)(rx_tid->hw_qdesc_paddr));
  1970. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  1971. }
  1972. }
  1973. /* Flush base descriptor */
  1974. qdf_mem_zero(&params, sizeof(params));
  1975. params.std.need_status = 1;
  1976. params.std.addr_lo =
  1977. (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
  1978. params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  1979. if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
  1980. CMD_FLUSH_CACHE,
  1981. &params,
  1982. dp_reo_desc_free,
  1983. (void *)desc)) {
  1984. union hal_reo_status reo_status;
  1985. /*
  1986. * If dp_reo_send_cmd return failure, related TID queue desc
  1987. * should be unmapped. Also locally reo_desc, together with
  1988. * TID queue desc also need to be freed accordingly.
  1989. *
  1990. * Here invoke desc_free function directly to do clean up.
  1991. *
  1992. * In case of MCL path add the desc back to the free
  1993. * desc list and defer deletion.
  1994. */
  1995. dp_err_log("%s: fail to send REO cmd to flush cache: tid %d",
  1996. __func__, rx_tid->tid);
  1997. dp_reo_desc_clean_up(soc, desc, &reo_status);
  1998. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  1999. }
  2000. }
  2001. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  2002. }
  2003. /*
  2004. * dp_rx_tid_delete_wifi3() – Delete receive TID queue
  2005. * @peer: Datapath peer handle
  2006. * @tid: TID
  2007. *
  2008. * Return: 0 on success, error code on failure
  2009. */
  2010. static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
  2011. {
  2012. struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
  2013. struct dp_soc *soc = peer->vdev->pdev->soc;
  2014. struct hal_reo_cmd_params params;
  2015. struct reo_desc_list_node *freedesc =
  2016. qdf_mem_malloc(sizeof(*freedesc));
  2017. if (!freedesc) {
  2018. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2019. "%s: malloc failed for freedesc: tid %d",
  2020. __func__, tid);
  2021. return -ENOMEM;
  2022. }
  2023. freedesc->rx_tid = *rx_tid;
  2024. freedesc->resend_update_reo_cmd = false;
  2025. qdf_mem_zero(&params, sizeof(params));
  2026. params.std.need_status = 1;
  2027. params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
  2028. params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  2029. params.u.upd_queue_params.update_vld = 1;
  2030. params.u.upd_queue_params.vld = 0;
  2031. if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
  2032. dp_rx_tid_delete_cb, (void *)freedesc)
  2033. != QDF_STATUS_SUCCESS) {
  2034. /* Defer the clean up to the call back context */
  2035. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  2036. freedesc->free_ts = qdf_get_system_timestamp();
  2037. freedesc->resend_update_reo_cmd = true;
  2038. qdf_list_insert_front(&soc->reo_desc_freelist,
  2039. (qdf_list_node_t *)freedesc);
  2040. DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
  2041. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  2042. dp_info("Failed to send CMD_UPDATE_RX_REO_QUEUE");
  2043. }
  2044. rx_tid->hw_qdesc_vaddr_unaligned = NULL;
  2045. rx_tid->hw_qdesc_alloc_size = 0;
  2046. rx_tid->hw_qdesc_paddr = 0;
  2047. return 0;
  2048. }
  2049. #ifdef DP_LFR
  2050. static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
  2051. {
  2052. int tid;
  2053. for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
  2054. dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
  2055. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2056. "Setting up TID %d for peer %pK peer->local_id %d",
  2057. tid, peer, peer->local_id);
  2058. }
  2059. }
  2060. #else
  2061. static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
  2062. #endif
  2063. #ifndef WLAN_TX_PKT_CAPTURE_ENH
  2064. /*
  2065. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  2066. * @peer: Datapath peer
  2067. *
  2068. */
  2069. static inline void dp_peer_tid_queue_init(struct dp_peer *peer)
  2070. {
  2071. }
  2072. /*
  2073. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  2074. * @peer: Datapath peer
  2075. *
  2076. */
  2077. static inline void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  2078. {
  2079. }
  2080. /*
  2081. * dp_peer_update_80211_hdr() – dp peer update 80211 hdr
  2082. * @vdev: Datapath vdev
  2083. * @peer: Datapath peer
  2084. *
  2085. */
  2086. static inline void
  2087. dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  2088. {
  2089. }
  2090. #endif
  2091. /*
  2092. * dp_peer_tx_init() – Initialize receive TID state
  2093. * @pdev: Datapath pdev
  2094. * @peer: Datapath peer
  2095. *
  2096. */
  2097. void dp_peer_tx_init(struct dp_pdev *pdev, struct dp_peer *peer)
  2098. {
  2099. dp_peer_tid_queue_init(peer);
  2100. dp_peer_update_80211_hdr(peer->vdev, peer);
  2101. }
  2102. /*
  2103. * dp_peer_tx_cleanup() – Deinitialize receive TID state
  2104. * @vdev: Datapath vdev
  2105. * @peer: Datapath peer
  2106. *
  2107. */
  2108. static inline void
  2109. dp_peer_tx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
  2110. {
  2111. dp_peer_tid_queue_cleanup(peer);
  2112. }
  2113. /*
  2114. * dp_peer_rx_init() – Initialize receive TID state
  2115. * @pdev: Datapath pdev
  2116. * @peer: Datapath peer
  2117. *
  2118. */
  2119. void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
  2120. {
  2121. int tid;
  2122. struct dp_rx_tid *rx_tid;
  2123. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  2124. rx_tid = &peer->rx_tid[tid];
  2125. rx_tid->array = &rx_tid->base;
  2126. rx_tid->base.head = rx_tid->base.tail = NULL;
  2127. rx_tid->tid = tid;
  2128. rx_tid->defrag_timeout_ms = 0;
  2129. rx_tid->ba_win_size = 0;
  2130. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2131. rx_tid->defrag_waitlist_elem.tqe_next = NULL;
  2132. rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
  2133. }
  2134. peer->active_ba_session_cnt = 0;
  2135. peer->hw_buffer_size = 0;
  2136. peer->kill_256_sessions = 0;
  2137. /* Setup default (non-qos) rx tid queue */
  2138. dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
  2139. /* Setup rx tid queue for TID 0.
  2140. * Other queues will be setup on receiving first packet, which will cause
  2141. * NULL REO queue error
  2142. */
  2143. dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
  2144. /*
  2145. * Setup the rest of TID's to handle LFR
  2146. */
  2147. dp_peer_setup_remaining_tids(peer);
  2148. /*
  2149. * Set security defaults: no PN check, no security. The target may
  2150. * send a HTT SEC_IND message to overwrite these defaults.
  2151. */
  2152. peer->security[dp_sec_ucast].sec_type =
  2153. peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
  2154. }
  2155. /*
  2156. * dp_peer_rx_cleanup() – Cleanup receive TID state
  2157. * @vdev: Datapath vdev
  2158. * @peer: Datapath peer
  2159. * @reuse: Peer reference reuse
  2160. *
  2161. */
  2162. void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
  2163. {
  2164. int tid;
  2165. uint32_t tid_delete_mask = 0;
  2166. dp_info("Remove tids for peer: %pK", peer);
  2167. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  2168. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  2169. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2170. if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
  2171. /* Cleanup defrag related resource */
  2172. dp_rx_defrag_waitlist_remove(peer, tid);
  2173. dp_rx_reorder_flush_frag(peer, tid);
  2174. }
  2175. if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
  2176. dp_rx_tid_delete_wifi3(peer, tid);
  2177. tid_delete_mask |= (1 << tid);
  2178. }
  2179. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2180. }
  2181. #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
  2182. if (soc->ol_ops->peer_rx_reorder_queue_remove) {
  2183. soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
  2184. peer->vdev->pdev->pdev_id,
  2185. peer->vdev->vdev_id, peer->mac_addr.raw,
  2186. tid_delete_mask);
  2187. }
  2188. #endif
  2189. if (!reuse)
  2190. for (tid = 0; tid < DP_MAX_TIDS; tid++)
  2191. qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
  2192. }
  2193. #ifdef FEATURE_PERPKT_INFO
  2194. /*
  2195. * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
  2196. * @peer: Datapath peer
  2197. *
  2198. * return: void
  2199. */
  2200. void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
  2201. {
  2202. qdf_mem_zero(&peer->delayed_ba_ppdu_stats,
  2203. sizeof(struct cdp_delayed_tx_completion_ppdu_user));
  2204. peer->last_delayed_ba = false;
  2205. peer->last_delayed_ba_ppduid = 0;
  2206. }
  2207. #else
  2208. /*
  2209. * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
  2210. * @peer: Datapath peer
  2211. *
  2212. * return: void
  2213. */
  2214. void dp_peer_ppdu_delayed_ba_init(struct dp_peer *peer)
  2215. {
  2216. }
  2217. #endif
  2218. /*
  2219. * dp_peer_cleanup() – Cleanup peer information
  2220. * @vdev: Datapath vdev
  2221. * @peer: Datapath peer
  2222. * @reuse: Peer reference reuse
  2223. *
  2224. */
  2225. void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer, bool reuse)
  2226. {
  2227. dp_peer_tx_cleanup(vdev, peer);
  2228. /* cleanup the Rx reorder queues for this peer */
  2229. dp_peer_rx_cleanup(vdev, peer, reuse);
  2230. }
  2231. /* dp_teardown_256_ba_session() - Teardown sessions using 256
  2232. * window size when a request with
  2233. * 64 window size is received.
  2234. * This is done as a WAR since HW can
  2235. * have only one setting per peer (64 or 256).
  2236. * For HKv2, we use per tid buffersize setting
  2237. * for 0 to per_tid_basize_max_tid. For tid
  2238. * more than per_tid_basize_max_tid we use HKv1
  2239. * method.
  2240. * @peer: Datapath peer
  2241. *
  2242. * Return: void
  2243. */
  2244. static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
  2245. {
  2246. uint8_t delba_rcode = 0;
  2247. int tid;
  2248. struct dp_rx_tid *rx_tid = NULL;
  2249. tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
  2250. for (; tid < DP_MAX_TIDS; tid++) {
  2251. rx_tid = &peer->rx_tid[tid];
  2252. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2253. if (rx_tid->ba_win_size <= 64) {
  2254. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2255. continue;
  2256. } else {
  2257. if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
  2258. rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2259. /* send delba */
  2260. if (!rx_tid->delba_tx_status) {
  2261. rx_tid->delba_tx_retry++;
  2262. rx_tid->delba_tx_status = 1;
  2263. rx_tid->delba_rcode =
  2264. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  2265. delba_rcode = rx_tid->delba_rcode;
  2266. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2267. if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
  2268. peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
  2269. peer->vdev->pdev->soc->ctrl_psoc,
  2270. peer->vdev->vdev_id,
  2271. peer->mac_addr.raw,
  2272. tid, delba_rcode);
  2273. } else {
  2274. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2275. }
  2276. } else {
  2277. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2278. }
  2279. }
  2280. }
  2281. }
  2282. /*
  2283. * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
  2284. *
  2285. * @soc: Datapath soc handle
  2286. * @peer_mac: Datapath peer mac address
  2287. * @vdev_id: id of atapath vdev
  2288. * @tid: TID number
  2289. * @status: tx completion status
  2290. * Return: 0 on success, error code on failure
  2291. */
  2292. int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
  2293. uint8_t *peer_mac,
  2294. uint16_t vdev_id,
  2295. uint8_t tid, int status)
  2296. {
  2297. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2298. peer_mac, 0, vdev_id);
  2299. struct dp_rx_tid *rx_tid = NULL;
  2300. if (!peer || peer->delete_in_progress) {
  2301. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2302. "%s: Peer is NULL!\n", __func__);
  2303. goto fail;
  2304. }
  2305. rx_tid = &peer->rx_tid[tid];
  2306. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2307. if (status) {
  2308. rx_tid->num_addba_rsp_failed++;
  2309. dp_rx_tid_update_wifi3(peer, tid, 1,
  2310. IEEE80211_SEQ_MAX);
  2311. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2312. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2313. dp_err("RxTid- %d addba rsp tx completion failed", tid);
  2314. goto success;
  2315. }
  2316. rx_tid->num_addba_rsp_success++;
  2317. if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
  2318. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2319. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2320. "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
  2321. __func__, tid);
  2322. goto fail;
  2323. }
  2324. if (!qdf_atomic_read(&peer->is_default_route_set)) {
  2325. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2326. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2327. "%s: default route is not set for peer: %pM",
  2328. __func__, peer->mac_addr.raw);
  2329. goto fail;
  2330. }
  2331. /* First Session */
  2332. if (peer->active_ba_session_cnt == 0) {
  2333. if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
  2334. peer->hw_buffer_size = 256;
  2335. else
  2336. peer->hw_buffer_size = 64;
  2337. }
  2338. rx_tid->ba_status = DP_RX_BA_ACTIVE;
  2339. peer->active_ba_session_cnt++;
  2340. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2341. /* Kill any session having 256 buffer size
  2342. * when 64 buffer size request is received.
  2343. * Also, latch on to 64 as new buffer size.
  2344. */
  2345. if (peer->kill_256_sessions) {
  2346. dp_teardown_256_ba_sessions(peer);
  2347. peer->kill_256_sessions = 0;
  2348. }
  2349. success:
  2350. dp_peer_unref_delete(peer);
  2351. return QDF_STATUS_SUCCESS;
  2352. fail:
  2353. if (peer)
  2354. dp_peer_unref_delete(peer);
  2355. return QDF_STATUS_E_FAILURE;
  2356. }
  2357. /*
  2358. * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
  2359. *
  2360. * @soc: Datapath soc handle
  2361. * @peer_mac: Datapath peer mac address
  2362. * @vdev_id: id of atapath vdev
  2363. * @tid: TID number
  2364. * @dialogtoken: output dialogtoken
  2365. * @statuscode: output dialogtoken
  2366. * @buffersize: Output BA window size
  2367. * @batimeout: Output BA timeout
  2368. */
  2369. QDF_STATUS
  2370. dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  2371. uint16_t vdev_id, uint8_t tid,
  2372. uint8_t *dialogtoken, uint16_t *statuscode,
  2373. uint16_t *buffersize, uint16_t *batimeout)
  2374. {
  2375. struct dp_rx_tid *rx_tid = NULL;
  2376. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2377. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2378. peer_mac, 0, vdev_id);
  2379. if (!peer || peer->delete_in_progress) {
  2380. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2381. "%s: Peer is NULL!\n", __func__);
  2382. status = QDF_STATUS_E_FAILURE;
  2383. goto fail;
  2384. }
  2385. rx_tid = &peer->rx_tid[tid];
  2386. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2387. rx_tid->num_of_addba_resp++;
  2388. /* setup ADDBA response parameters */
  2389. *dialogtoken = rx_tid->dialogtoken;
  2390. *statuscode = rx_tid->statuscode;
  2391. *buffersize = rx_tid->ba_win_size;
  2392. *batimeout = 0;
  2393. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2394. fail:
  2395. if (peer)
  2396. dp_peer_unref_delete(peer);
  2397. return status;
  2398. }
  2399. /* dp_check_ba_buffersize() - Check buffer size in request
  2400. * and latch onto this size based on
  2401. * size used in first active session.
  2402. * @peer: Datapath peer
  2403. * @tid: Tid
  2404. * @buffersize: Block ack window size
  2405. *
  2406. * Return: void
  2407. */
  2408. static void dp_check_ba_buffersize(struct dp_peer *peer,
  2409. uint16_t tid,
  2410. uint16_t buffersize)
  2411. {
  2412. struct dp_rx_tid *rx_tid = NULL;
  2413. rx_tid = &peer->rx_tid[tid];
  2414. if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
  2415. tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
  2416. rx_tid->ba_win_size = buffersize;
  2417. return;
  2418. } else {
  2419. if (peer->active_ba_session_cnt == 0) {
  2420. rx_tid->ba_win_size = buffersize;
  2421. } else {
  2422. if (peer->hw_buffer_size == 64) {
  2423. if (buffersize <= 64)
  2424. rx_tid->ba_win_size = buffersize;
  2425. else
  2426. rx_tid->ba_win_size = peer->hw_buffer_size;
  2427. } else if (peer->hw_buffer_size == 256) {
  2428. if (buffersize > 64) {
  2429. rx_tid->ba_win_size = buffersize;
  2430. } else {
  2431. rx_tid->ba_win_size = buffersize;
  2432. peer->hw_buffer_size = 64;
  2433. peer->kill_256_sessions = 1;
  2434. }
  2435. }
  2436. }
  2437. }
  2438. }
  2439. /*
  2440. * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
  2441. *
  2442. * @soc: Datapath soc handle
  2443. * @peer_mac: Datapath peer mac address
  2444. * @vdev_id: id of atapath vdev
  2445. * @dialogtoken: dialogtoken from ADDBA frame
  2446. * @tid: TID number
  2447. * @batimeout: BA timeout
  2448. * @buffersize: BA window size
  2449. * @startseqnum: Start seq. number received in BA sequence control
  2450. *
  2451. * Return: 0 on success, error code on failure
  2452. */
  2453. int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
  2454. uint8_t *peer_mac,
  2455. uint16_t vdev_id,
  2456. uint8_t dialogtoken,
  2457. uint16_t tid, uint16_t batimeout,
  2458. uint16_t buffersize,
  2459. uint16_t startseqnum)
  2460. {
  2461. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2462. struct dp_rx_tid *rx_tid = NULL;
  2463. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2464. peer_mac, 0, vdev_id);
  2465. if (!peer || peer->delete_in_progress) {
  2466. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2467. "%s: Peer is NULL!\n", __func__);
  2468. status = QDF_STATUS_E_FAILURE;
  2469. goto fail;
  2470. }
  2471. rx_tid = &peer->rx_tid[tid];
  2472. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2473. rx_tid->num_of_addba_req++;
  2474. if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
  2475. rx_tid->hw_qdesc_vaddr_unaligned)) {
  2476. dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  2477. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2478. peer->active_ba_session_cnt--;
  2479. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2480. "%s: Rx Tid- %d hw qdesc is already setup",
  2481. __func__, tid);
  2482. }
  2483. if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2484. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2485. status = QDF_STATUS_E_FAILURE;
  2486. goto fail;
  2487. }
  2488. dp_check_ba_buffersize(peer, tid, buffersize);
  2489. if (dp_rx_tid_setup_wifi3(peer, tid,
  2490. rx_tid->ba_win_size, startseqnum)) {
  2491. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2492. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2493. status = QDF_STATUS_E_FAILURE;
  2494. goto fail;
  2495. }
  2496. rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
  2497. rx_tid->dialogtoken = dialogtoken;
  2498. rx_tid->startseqnum = startseqnum;
  2499. if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
  2500. rx_tid->statuscode = rx_tid->userstatuscode;
  2501. else
  2502. rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
  2503. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2504. fail:
  2505. if (peer)
  2506. dp_peer_unref_delete(peer);
  2507. return status;
  2508. }
  2509. /*
  2510. * dp_set_addba_response() – Set a user defined ADDBA response status code
  2511. *
  2512. * @soc: Datapath soc handle
  2513. * @peer_mac: Datapath peer mac address
  2514. * @vdev_id: id of atapath vdev
  2515. * @tid: TID number
  2516. * @statuscode: response status code to be set
  2517. */
  2518. QDF_STATUS
  2519. dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  2520. uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
  2521. {
  2522. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2523. peer_mac, 0, vdev_id);
  2524. struct dp_rx_tid *rx_tid;
  2525. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2526. if (!peer || peer->delete_in_progress) {
  2527. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2528. "%s: Peer is NULL!\n", __func__);
  2529. status = QDF_STATUS_E_FAILURE;
  2530. goto fail;
  2531. }
  2532. rx_tid = &peer->rx_tid[tid];
  2533. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2534. rx_tid->userstatuscode = statuscode;
  2535. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2536. fail:
  2537. if (peer)
  2538. dp_peer_unref_delete(peer);
  2539. return status;
  2540. }
  2541. /*
  2542. * dp_rx_delba_process_wifi3() – Process DELBA from peer
  2543. * @soc: Datapath soc handle
  2544. * @peer_mac: Datapath peer mac address
  2545. * @vdev_id: id of atapath vdev
  2546. * @tid: TID number
  2547. * @reasoncode: Reason code received in DELBA frame
  2548. *
  2549. * Return: 0 on success, error code on failure
  2550. */
  2551. int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  2552. uint16_t vdev_id, int tid, uint16_t reasoncode)
  2553. {
  2554. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2555. struct dp_rx_tid *rx_tid;
  2556. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2557. peer_mac, 0, vdev_id);
  2558. if (!peer || peer->delete_in_progress) {
  2559. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2560. "%s: Peer is NULL!\n", __func__);
  2561. status = QDF_STATUS_E_FAILURE;
  2562. goto fail;
  2563. }
  2564. rx_tid = &peer->rx_tid[tid];
  2565. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2566. if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
  2567. rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2568. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2569. status = QDF_STATUS_E_FAILURE;
  2570. goto fail;
  2571. }
  2572. /* TODO: See if we can delete the existing REO queue descriptor and
  2573. * replace with a new one without queue extenstion descript to save
  2574. * memory
  2575. */
  2576. rx_tid->delba_rcode = reasoncode;
  2577. rx_tid->num_of_delba_req++;
  2578. dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  2579. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2580. peer->active_ba_session_cnt--;
  2581. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2582. fail:
  2583. if (peer)
  2584. dp_peer_unref_delete(peer);
  2585. return status;
  2586. }
  2587. /*
  2588. * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
  2589. *
  2590. * @soc: Datapath soc handle
  2591. * @peer_mac: Datapath peer mac address
  2592. * @vdev_id: id of atapath vdev
  2593. * @tid: TID number
  2594. * @status: tx completion status
  2595. * Return: 0 on success, error code on failure
  2596. */
  2597. int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  2598. uint16_t vdev_id,
  2599. uint8_t tid, int status)
  2600. {
  2601. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  2602. struct dp_rx_tid *rx_tid = NULL;
  2603. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
  2604. peer_mac, 0, vdev_id);
  2605. if (!peer || peer->delete_in_progress) {
  2606. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2607. "%s: Peer is NULL!", __func__);
  2608. ret = QDF_STATUS_E_FAILURE;
  2609. goto end;
  2610. }
  2611. rx_tid = &peer->rx_tid[tid];
  2612. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2613. if (status) {
  2614. rx_tid->delba_tx_fail_cnt++;
  2615. if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
  2616. rx_tid->delba_tx_retry = 0;
  2617. rx_tid->delba_tx_status = 0;
  2618. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2619. } else {
  2620. rx_tid->delba_tx_retry++;
  2621. rx_tid->delba_tx_status = 1;
  2622. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2623. if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
  2624. peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
  2625. peer->vdev->pdev->soc->ctrl_psoc,
  2626. peer->vdev->vdev_id,
  2627. peer->mac_addr.raw, tid,
  2628. rx_tid->delba_rcode);
  2629. }
  2630. goto end;
  2631. } else {
  2632. rx_tid->delba_tx_success_cnt++;
  2633. rx_tid->delba_tx_retry = 0;
  2634. rx_tid->delba_tx_status = 0;
  2635. }
  2636. if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
  2637. dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  2638. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2639. peer->active_ba_session_cnt--;
  2640. }
  2641. if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2642. dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX);
  2643. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2644. }
  2645. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2646. end:
  2647. if (peer)
  2648. dp_peer_unref_delete(peer);
  2649. return ret;
  2650. }
  2651. /**
  2652. * dp_set_pn_check_wifi3() - enable PN check in REO for security
  2653. * @soc: Datapath soc handle
  2654. * @peer_mac: Datapath peer mac address
  2655. * @vdev_id: id of atapath vdev
  2656. * @vdev: Datapath vdev
  2657. * @pdev - data path device instance
  2658. * @sec_type - security type
  2659. * @rx_pn - Receive pn starting number
  2660. *
  2661. */
  2662. QDF_STATUS
  2663. dp_set_pn_check_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
  2664. uint8_t *peer_mac, enum cdp_sec_type sec_type,
  2665. uint32_t *rx_pn)
  2666. {
  2667. struct dp_pdev *pdev;
  2668. int i;
  2669. uint8_t pn_size;
  2670. struct hal_reo_cmd_params params;
  2671. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2672. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  2673. peer_mac, 0, vdev_id);
  2674. struct dp_vdev *vdev =
  2675. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  2676. vdev_id);
  2677. if (!vdev || !peer || peer->delete_in_progress) {
  2678. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2679. "%s: Peer is NULL!\n", __func__);
  2680. status = QDF_STATUS_E_FAILURE;
  2681. goto fail;
  2682. }
  2683. pdev = vdev->pdev;
  2684. qdf_mem_zero(&params, sizeof(params));
  2685. params.std.need_status = 1;
  2686. params.u.upd_queue_params.update_pn_valid = 1;
  2687. params.u.upd_queue_params.update_pn_size = 1;
  2688. params.u.upd_queue_params.update_pn = 1;
  2689. params.u.upd_queue_params.update_pn_check_needed = 1;
  2690. params.u.upd_queue_params.update_svld = 1;
  2691. params.u.upd_queue_params.svld = 0;
  2692. peer->security[dp_sec_ucast].sec_type = sec_type;
  2693. switch (sec_type) {
  2694. case cdp_sec_type_tkip_nomic:
  2695. case cdp_sec_type_aes_ccmp:
  2696. case cdp_sec_type_aes_ccmp_256:
  2697. case cdp_sec_type_aes_gcmp:
  2698. case cdp_sec_type_aes_gcmp_256:
  2699. params.u.upd_queue_params.pn_check_needed = 1;
  2700. params.u.upd_queue_params.pn_size = 48;
  2701. pn_size = 48;
  2702. break;
  2703. case cdp_sec_type_wapi:
  2704. params.u.upd_queue_params.pn_check_needed = 1;
  2705. params.u.upd_queue_params.pn_size = 128;
  2706. pn_size = 128;
  2707. if (vdev->opmode == wlan_op_mode_ap) {
  2708. params.u.upd_queue_params.pn_even = 1;
  2709. params.u.upd_queue_params.update_pn_even = 1;
  2710. } else {
  2711. params.u.upd_queue_params.pn_uneven = 1;
  2712. params.u.upd_queue_params.update_pn_uneven = 1;
  2713. }
  2714. break;
  2715. default:
  2716. params.u.upd_queue_params.pn_check_needed = 0;
  2717. pn_size = 0;
  2718. break;
  2719. }
  2720. for (i = 0; i < DP_MAX_TIDS; i++) {
  2721. struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
  2722. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2723. if (rx_tid->hw_qdesc_vaddr_unaligned) {
  2724. params.std.addr_lo =
  2725. rx_tid->hw_qdesc_paddr & 0xffffffff;
  2726. params.std.addr_hi =
  2727. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  2728. if (pn_size) {
  2729. QDF_TRACE(QDF_MODULE_ID_DP,
  2730. QDF_TRACE_LEVEL_INFO_HIGH,
  2731. "%s PN set for TID:%d pn:%x:%x:%x:%x",
  2732. __func__, i, rx_pn[3], rx_pn[2],
  2733. rx_pn[1], rx_pn[0]);
  2734. params.u.upd_queue_params.update_pn_valid = 1;
  2735. params.u.upd_queue_params.pn_31_0 = rx_pn[0];
  2736. params.u.upd_queue_params.pn_63_32 = rx_pn[1];
  2737. params.u.upd_queue_params.pn_95_64 = rx_pn[2];
  2738. params.u.upd_queue_params.pn_127_96 = rx_pn[3];
  2739. }
  2740. rx_tid->pn_size = pn_size;
  2741. if (dp_reo_send_cmd(cdp_soc_t_to_dp_soc(soc),
  2742. CMD_UPDATE_RX_REO_QUEUE,
  2743. &params, dp_rx_tid_update_cb,
  2744. rx_tid)) {
  2745. dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
  2746. "tid %d desc %pK", rx_tid->tid,
  2747. (void *)(rx_tid->hw_qdesc_paddr));
  2748. DP_STATS_INC(cdp_soc_t_to_dp_soc(soc),
  2749. rx.err.reo_cmd_send_fail, 1);
  2750. }
  2751. } else {
  2752. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2753. "PN Check not setup for TID :%d ", i);
  2754. }
  2755. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2756. }
  2757. fail:
  2758. if (peer)
  2759. dp_peer_unref_delete(peer);
  2760. return status;
  2761. }
  2762. /**
  2763. * dp_set_key_sec_type_wifi3() - set security mode of key
  2764. * @soc: Datapath soc handle
  2765. * @peer_mac: Datapath peer mac address
  2766. * @vdev_id: id of atapath vdev
  2767. * @vdev: Datapath vdev
  2768. * @pdev - data path device instance
  2769. * @sec_type - security type
  2770. * #is_unicast - key type
  2771. *
  2772. */
  2773. QDF_STATUS
  2774. dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
  2775. uint8_t *peer_mac, enum cdp_sec_type sec_type,
  2776. bool is_unicast)
  2777. {
  2778. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  2779. peer_mac, 0, vdev_id);
  2780. QDF_STATUS status = QDF_STATUS_SUCCESS;
  2781. int sec_index;
  2782. if (!peer || peer->delete_in_progress) {
  2783. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2784. "%s: Peer is NULL!\n", __func__);
  2785. status = QDF_STATUS_E_FAILURE;
  2786. goto fail;
  2787. }
  2788. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2789. "key sec spec for peer %pK %pM: %s key of type %d",
  2790. peer,
  2791. peer->mac_addr.raw,
  2792. is_unicast ? "ucast" : "mcast",
  2793. sec_type);
  2794. sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
  2795. peer->security[sec_index].sec_type = sec_type;
  2796. fail:
  2797. if (peer)
  2798. dp_peer_unref_delete(peer);
  2799. return status;
  2800. }
  2801. void
  2802. dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
  2803. enum cdp_sec_type sec_type, int is_unicast,
  2804. u_int32_t *michael_key,
  2805. u_int32_t *rx_pn)
  2806. {
  2807. struct dp_peer *peer;
  2808. int sec_index;
  2809. peer = dp_peer_find_by_id(soc, peer_id);
  2810. if (!peer) {
  2811. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2812. "Couldn't find peer from ID %d - skipping security inits",
  2813. peer_id);
  2814. return;
  2815. }
  2816. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2817. "sec spec for peer %pK %pM: %s key of type %d",
  2818. peer,
  2819. peer->mac_addr.raw,
  2820. is_unicast ? "ucast" : "mcast",
  2821. sec_type);
  2822. sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
  2823. peer->security[sec_index].sec_type = sec_type;
  2824. #ifdef notyet /* TODO: See if this is required for defrag support */
  2825. /* michael key only valid for TKIP, but for simplicity,
  2826. * copy it anyway
  2827. */
  2828. qdf_mem_copy(
  2829. &peer->security[sec_index].michael_key[0],
  2830. michael_key,
  2831. sizeof(peer->security[sec_index].michael_key));
  2832. #ifdef BIG_ENDIAN_HOST
  2833. OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
  2834. sizeof(peer->security[sec_index].michael_key));
  2835. #endif /* BIG_ENDIAN_HOST */
  2836. #endif
  2837. #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
  2838. if (sec_type != cdp_sec_type_wapi) {
  2839. qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
  2840. } else {
  2841. for (i = 0; i < DP_MAX_TIDS; i++) {
  2842. /*
  2843. * Setting PN valid bit for WAPI sec_type,
  2844. * since WAPI PN has to be started with predefined value
  2845. */
  2846. peer->tids_last_pn_valid[i] = 1;
  2847. qdf_mem_copy(
  2848. (u_int8_t *) &peer->tids_last_pn[i],
  2849. (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
  2850. peer->tids_last_pn[i].pn128[1] =
  2851. qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
  2852. peer->tids_last_pn[i].pn128[0] =
  2853. qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
  2854. }
  2855. }
  2856. #endif
  2857. /* TODO: Update HW TID queue with PN check parameters (pn type for
  2858. * all security types and last pn for WAPI) once REO command API
  2859. * is available
  2860. */
  2861. dp_peer_unref_del_find_by_id(peer);
  2862. }
  2863. #ifdef DP_PEER_EXTENDED_API
  2864. QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  2865. struct ol_txrx_desc_type *sta_desc)
  2866. {
  2867. struct dp_peer *peer;
  2868. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2869. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2870. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev,
  2871. sta_desc->peer_addr.bytes);
  2872. if (!pdev)
  2873. return QDF_STATUS_E_FAULT;
  2874. if (!peer)
  2875. return QDF_STATUS_E_FAULT;
  2876. qdf_spin_lock_bh(&peer->peer_info_lock);
  2877. peer->state = OL_TXRX_PEER_STATE_CONN;
  2878. qdf_spin_unlock_bh(&peer->peer_info_lock);
  2879. dp_rx_flush_rx_cached(peer, false);
  2880. return QDF_STATUS_SUCCESS;
  2881. }
  2882. QDF_STATUS
  2883. dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  2884. struct qdf_mac_addr peer_addr)
  2885. {
  2886. struct dp_peer *peer;
  2887. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2888. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2889. if (!pdev)
  2890. return QDF_STATUS_E_FAULT;
  2891. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
  2892. if (!peer)
  2893. return QDF_STATUS_E_FAULT;
  2894. qdf_spin_lock_bh(&peer->peer_info_lock);
  2895. peer->state = OL_TXRX_PEER_STATE_DISC;
  2896. qdf_spin_unlock_bh(&peer->peer_info_lock);
  2897. dp_rx_flush_rx_cached(peer, true);
  2898. return QDF_STATUS_SUCCESS;
  2899. }
  2900. /**
  2901. * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
  2902. * @pdev - data path device instance
  2903. * @vdev - virtual interface instance
  2904. * @peer_addr - peer mac address
  2905. *
  2906. * Find peer by peer mac address within vdev
  2907. *
  2908. * Return: peer instance void pointer
  2909. * NULL cannot find target peer
  2910. */
  2911. void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
  2912. struct cdp_vdev *vdev_handle,
  2913. uint8_t *peer_addr)
  2914. {
  2915. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2916. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2917. struct dp_peer *peer;
  2918. peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, DP_VDEV_ALL);
  2919. if (!peer)
  2920. return NULL;
  2921. if (peer->vdev != vdev) {
  2922. dp_peer_unref_delete(peer);
  2923. return NULL;
  2924. }
  2925. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  2926. * Decrement it here.
  2927. */
  2928. dp_peer_unref_delete(peer);
  2929. return peer;
  2930. }
  2931. QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  2932. enum ol_txrx_peer_state state)
  2933. {
  2934. struct dp_peer *peer;
  2935. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2936. peer = dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
  2937. if (!peer) {
  2938. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2939. "Failed to find peer for: [%pM]", peer_mac);
  2940. return QDF_STATUS_E_FAILURE;
  2941. }
  2942. peer->state = state;
  2943. dp_info("peer %pK state %d", peer, peer->state);
  2944. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  2945. * Decrement it here.
  2946. */
  2947. dp_peer_unref_delete(peer);
  2948. return QDF_STATUS_SUCCESS;
  2949. }
  2950. QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  2951. uint8_t *vdev_id)
  2952. {
  2953. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2954. struct dp_peer *peer =
  2955. dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL);
  2956. if (!peer)
  2957. return QDF_STATUS_E_FAILURE;
  2958. dp_info("peer %pK vdev %pK vdev id %d",
  2959. peer, peer->vdev, peer->vdev->vdev_id);
  2960. *vdev_id = peer->vdev->vdev_id;
  2961. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  2962. * Decrement it here.
  2963. */
  2964. dp_peer_unref_delete(peer);
  2965. return QDF_STATUS_SUCCESS;
  2966. }
  2967. struct cdp_vdev *
  2968. dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
  2969. struct qdf_mac_addr peer_addr)
  2970. {
  2971. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2972. struct dp_peer *peer = NULL;
  2973. if (!pdev) {
  2974. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  2975. "PDEV not found for peer_addr: %pM",
  2976. peer_addr.bytes);
  2977. return NULL;
  2978. }
  2979. peer = dp_find_peer_by_addr((struct cdp_pdev *)pdev, peer_addr.bytes);
  2980. if (!peer) {
  2981. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  2982. "PDEV not found for peer_addr: %pM",
  2983. peer_addr.bytes);
  2984. return NULL;
  2985. }
  2986. return (struct cdp_vdev *)peer->vdev;
  2987. }
  2988. /**
  2989. * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
  2990. * @peer - peer instance
  2991. *
  2992. * Get virtual interface instance which peer belongs
  2993. *
  2994. * Return: virtual interface instance pointer
  2995. * NULL in case cannot find
  2996. */
  2997. struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
  2998. {
  2999. struct dp_peer *peer = peer_handle;
  3000. DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
  3001. return (struct cdp_vdev *)peer->vdev;
  3002. }
  3003. /**
  3004. * dp_peer_get_peer_mac_addr() - Get peer mac address
  3005. * @peer - peer instance
  3006. *
  3007. * Get peer mac address
  3008. *
  3009. * Return: peer mac address pointer
  3010. * NULL in case cannot find
  3011. */
  3012. uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
  3013. {
  3014. struct dp_peer *peer = peer_handle;
  3015. uint8_t *mac;
  3016. mac = peer->mac_addr.raw;
  3017. dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
  3018. peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  3019. return peer->mac_addr.raw;
  3020. }
  3021. int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3022. uint8_t *peer_mac)
  3023. {
  3024. enum ol_txrx_peer_state peer_state;
  3025. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3026. struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac, 0,
  3027. vdev_id);
  3028. if (!peer)
  3029. return QDF_STATUS_E_FAILURE;
  3030. DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
  3031. peer_state = peer->state;
  3032. dp_peer_unref_delete(peer);
  3033. return peer_state;
  3034. }
  3035. /**
  3036. * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
  3037. * @pdev - data path device instance
  3038. *
  3039. * local peer id pool alloc for physical device
  3040. *
  3041. * Return: none
  3042. */
  3043. void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
  3044. {
  3045. int i;
  3046. /* point the freelist to the first ID */
  3047. pdev->local_peer_ids.freelist = 0;
  3048. /* link each ID to the next one */
  3049. for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
  3050. pdev->local_peer_ids.pool[i] = i + 1;
  3051. pdev->local_peer_ids.map[i] = NULL;
  3052. }
  3053. /* link the last ID to itself, to mark the end of the list */
  3054. i = OL_TXRX_NUM_LOCAL_PEER_IDS;
  3055. pdev->local_peer_ids.pool[i] = i;
  3056. qdf_spinlock_create(&pdev->local_peer_ids.lock);
  3057. DP_TRACE(INFO, "Peer pool init");
  3058. }
  3059. /**
  3060. * dp_local_peer_id_alloc() - allocate local peer id
  3061. * @pdev - data path device instance
  3062. * @peer - new peer instance
  3063. *
  3064. * allocate local peer id
  3065. *
  3066. * Return: none
  3067. */
  3068. void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
  3069. {
  3070. int i;
  3071. qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  3072. i = pdev->local_peer_ids.freelist;
  3073. if (pdev->local_peer_ids.pool[i] == i) {
  3074. /* the list is empty, except for the list-end marker */
  3075. peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
  3076. } else {
  3077. /* take the head ID and advance the freelist */
  3078. peer->local_id = i;
  3079. pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
  3080. pdev->local_peer_ids.map[i] = peer;
  3081. }
  3082. qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  3083. dp_info("peer %pK, local id %d", peer, peer->local_id);
  3084. }
  3085. /**
  3086. * dp_local_peer_id_free() - remove local peer id
  3087. * @pdev - data path device instance
  3088. * @peer - peer instance should be removed
  3089. *
  3090. * remove local peer id
  3091. *
  3092. * Return: none
  3093. */
  3094. void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
  3095. {
  3096. int i = peer->local_id;
  3097. if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
  3098. (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
  3099. return;
  3100. }
  3101. /* put this ID on the head of the freelist */
  3102. qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  3103. pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
  3104. pdev->local_peer_ids.freelist = i;
  3105. pdev->local_peer_ids.map[i] = NULL;
  3106. qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  3107. }
  3108. bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
  3109. uint8_t vdev_id, uint8_t *peer_addr)
  3110. {
  3111. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3112. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  3113. if (!vdev)
  3114. return false;
  3115. return !!dp_find_peer_by_addr_and_vdev(
  3116. dp_pdev_to_cdp_pdev(vdev->pdev),
  3117. dp_vdev_to_cdp_vdev(vdev),
  3118. peer_addr);
  3119. }
  3120. bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
  3121. uint8_t vdev_id, uint8_t *peer_addr,
  3122. uint16_t max_bssid)
  3123. {
  3124. int i;
  3125. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3126. struct dp_vdev *vdev;
  3127. for (i = 0; i < max_bssid; i++) {
  3128. vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  3129. /* Need to check vdevs other than the vdev_id */
  3130. if (vdev_id == i || !vdev)
  3131. continue;
  3132. if (dp_find_peer_by_addr_and_vdev(
  3133. dp_pdev_to_cdp_pdev(vdev->pdev),
  3134. dp_vdev_to_cdp_vdev(vdev),
  3135. peer_addr)) {
  3136. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  3137. "%s: Duplicate peer %pM already exist on vdev %d",
  3138. __func__, peer_addr, i);
  3139. return true;
  3140. }
  3141. }
  3142. return false;
  3143. }
  3144. bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  3145. uint8_t *peer_addr)
  3146. {
  3147. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3148. struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  3149. if (!pdev)
  3150. return false;
  3151. return !!dp_find_peer_by_addr(dp_pdev_to_cdp_pdev(pdev), peer_addr);
  3152. }
  3153. #endif
  3154. /**
  3155. * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
  3156. * @peer: DP peer handle
  3157. * @dp_stats_cmd_cb: REO command callback function
  3158. * @cb_ctxt: Callback context
  3159. *
  3160. * Return: count of tid stats cmd send succeeded
  3161. */
  3162. int dp_peer_rxtid_stats(struct dp_peer *peer,
  3163. dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
  3164. void *cb_ctxt)
  3165. {
  3166. struct dp_soc *soc = peer->vdev->pdev->soc;
  3167. struct hal_reo_cmd_params params;
  3168. int i;
  3169. int stats_cmd_sent_cnt = 0;
  3170. QDF_STATUS status;
  3171. if (!dp_stats_cmd_cb)
  3172. return stats_cmd_sent_cnt;
  3173. qdf_mem_zero(&params, sizeof(params));
  3174. for (i = 0; i < DP_MAX_TIDS; i++) {
  3175. struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
  3176. if (rx_tid->hw_qdesc_vaddr_unaligned) {
  3177. params.std.need_status = 1;
  3178. params.std.addr_lo =
  3179. rx_tid->hw_qdesc_paddr & 0xffffffff;
  3180. params.std.addr_hi =
  3181. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  3182. if (cb_ctxt) {
  3183. status = dp_reo_send_cmd(
  3184. soc, CMD_GET_QUEUE_STATS,
  3185. &params, dp_stats_cmd_cb,
  3186. cb_ctxt);
  3187. } else {
  3188. status = dp_reo_send_cmd(
  3189. soc, CMD_GET_QUEUE_STATS,
  3190. &params, dp_stats_cmd_cb,
  3191. rx_tid);
  3192. }
  3193. if (QDF_IS_STATUS_SUCCESS(status))
  3194. stats_cmd_sent_cnt++;
  3195. /* Flush REO descriptor from HW cache to update stats
  3196. * in descriptor memory. This is to help debugging */
  3197. qdf_mem_zero(&params, sizeof(params));
  3198. params.std.need_status = 0;
  3199. params.std.addr_lo =
  3200. rx_tid->hw_qdesc_paddr & 0xffffffff;
  3201. params.std.addr_hi =
  3202. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  3203. params.u.fl_cache_params.flush_no_inval = 1;
  3204. dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
  3205. NULL);
  3206. }
  3207. }
  3208. return stats_cmd_sent_cnt;
  3209. }
  3210. QDF_STATUS
  3211. dp_set_michael_key(struct cdp_soc_t *soc,
  3212. uint8_t vdev_id,
  3213. uint8_t *peer_mac,
  3214. bool is_unicast, uint32_t *key)
  3215. {
  3216. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3217. uint8_t sec_index = is_unicast ? 1 : 0;
  3218. struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
  3219. peer_mac, 0, vdev_id);
  3220. if (!peer || peer->delete_in_progress) {
  3221. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3222. "peer not found ");
  3223. status = QDF_STATUS_E_FAILURE;
  3224. goto fail;
  3225. }
  3226. qdf_mem_copy(&peer->security[sec_index].michael_key[0],
  3227. key, IEEE80211_WEP_MICLEN);
  3228. fail:
  3229. if (peer)
  3230. dp_peer_unref_delete(peer);
  3231. return status;
  3232. }
  3233. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
  3234. {
  3235. struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
  3236. if (peer) {
  3237. /*
  3238. * Decrement the peer ref which is taken as part of
  3239. * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
  3240. */
  3241. dp_peer_unref_del_find_by_id(peer);
  3242. return true;
  3243. }
  3244. return false;
  3245. }