dp_peer.c 85 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <hal_hw_headers.h>
  21. #include "dp_htt.h"
  22. #include "dp_types.h"
  23. #include "dp_internal.h"
  24. #include "dp_peer.h"
  25. #include "dp_rx_defrag.h"
  26. #include <hal_api.h>
  27. #include <hal_reo.h>
  28. #ifdef CONFIG_MCL
  29. #include <cds_ieee80211_common.h>
  30. #include <cds_api.h>
  31. #endif
  32. #include <cdp_txrx_handle.h>
  33. #include <wlan_cfg.h>
  34. #ifdef DP_LFR
  35. static inline void
  36. dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
  37. uint8_t valid)
  38. {
  39. params->u.upd_queue_params.update_svld = 1;
  40. params->u.upd_queue_params.svld = valid;
  41. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  42. "%s: Setting SSN valid bit to %d",
  43. __func__, valid);
  44. }
  45. #else
  46. static inline void
  47. dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
  48. uint8_t valid) {};
  49. #endif
  50. static inline int dp_peer_find_mac_addr_cmp(
  51. union dp_align_mac_addr *mac_addr1,
  52. union dp_align_mac_addr *mac_addr2)
  53. {
  54. return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
  55. /*
  56. * Intentionally use & rather than &&.
  57. * because the operands are binary rather than generic boolean,
  58. * the functionality is equivalent.
  59. * Using && has the advantage of short-circuited evaluation,
  60. * but using & has the advantage of no conditional branching,
  61. * which is a more significant benefit.
  62. */
  63. &
  64. (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
  65. }
  66. static int dp_peer_find_map_attach(struct dp_soc *soc)
  67. {
  68. uint32_t max_peers, peer_map_size;
  69. max_peers = soc->max_peers;
  70. /* allocate the peer ID -> peer object map */
  71. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  72. "\n<=== cfg max peer id %d ====>", max_peers);
  73. peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
  74. soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
  75. if (!soc->peer_id_to_obj_map) {
  76. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  77. "%s: peer map memory allocation failed", __func__);
  78. return QDF_STATUS_E_NOMEM;
  79. }
  80. /*
  81. * The peer_id_to_obj_map doesn't really need to be initialized,
  82. * since elements are only used after they have been individually
  83. * initialized.
  84. * However, it is convenient for debugging to have all elements
  85. * that are not in use set to 0.
  86. */
  87. qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
  88. return 0; /* success */
  89. }
  90. static int dp_log2_ceil(unsigned value)
  91. {
  92. unsigned tmp = value;
  93. int log2 = -1;
  94. while (tmp) {
  95. log2++;
  96. tmp >>= 1;
  97. }
  98. if (1 << log2 != value)
  99. log2++;
  100. return log2;
  101. }
  102. static int dp_peer_find_add_id_to_obj(
  103. struct dp_peer *peer,
  104. uint16_t peer_id)
  105. {
  106. int i;
  107. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
  108. if (peer->peer_ids[i] == HTT_INVALID_PEER) {
  109. peer->peer_ids[i] = peer_id;
  110. return 0; /* success */
  111. }
  112. }
  113. return QDF_STATUS_E_FAILURE; /* failure */
  114. }
  115. #define DP_PEER_HASH_LOAD_MULT 2
  116. #define DP_PEER_HASH_LOAD_SHIFT 0
  117. #define DP_AST_HASH_LOAD_MULT 2
  118. #define DP_AST_HASH_LOAD_SHIFT 0
  119. static int dp_peer_find_hash_attach(struct dp_soc *soc)
  120. {
  121. int i, hash_elems, log2;
  122. /* allocate the peer MAC address -> peer object hash table */
  123. hash_elems = soc->max_peers;
  124. hash_elems *= DP_PEER_HASH_LOAD_MULT;
  125. hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
  126. log2 = dp_log2_ceil(hash_elems);
  127. hash_elems = 1 << log2;
  128. soc->peer_hash.mask = hash_elems - 1;
  129. soc->peer_hash.idx_bits = log2;
  130. /* allocate an array of TAILQ peer object lists */
  131. soc->peer_hash.bins = qdf_mem_malloc(
  132. hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
  133. if (!soc->peer_hash.bins)
  134. return QDF_STATUS_E_NOMEM;
  135. for (i = 0; i < hash_elems; i++)
  136. TAILQ_INIT(&soc->peer_hash.bins[i]);
  137. return 0;
  138. }
  139. static void dp_peer_find_hash_detach(struct dp_soc *soc)
  140. {
  141. qdf_mem_free(soc->peer_hash.bins);
  142. }
  143. static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
  144. union dp_align_mac_addr *mac_addr)
  145. {
  146. unsigned index;
  147. index =
  148. mac_addr->align2.bytes_ab ^
  149. mac_addr->align2.bytes_cd ^
  150. mac_addr->align2.bytes_ef;
  151. index ^= index >> soc->peer_hash.idx_bits;
  152. index &= soc->peer_hash.mask;
  153. return index;
  154. }
  155. void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
  156. {
  157. unsigned index;
  158. index = dp_peer_find_hash_index(soc, &peer->mac_addr);
  159. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  160. /*
  161. * It is important to add the new peer at the tail of the peer list
  162. * with the bin index. Together with having the hash_find function
  163. * search from head to tail, this ensures that if two entries with
  164. * the same MAC address are stored, the one added first will be
  165. * found first.
  166. */
  167. TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
  168. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  169. }
  170. #ifdef FEATURE_AST
  171. /*
  172. * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
  173. * @soc: SoC handle
  174. *
  175. * Return: None
  176. */
  177. static int dp_peer_ast_hash_attach(struct dp_soc *soc)
  178. {
  179. int i, hash_elems, log2;
  180. hash_elems = ((soc->max_peers * DP_AST_HASH_LOAD_MULT) >>
  181. DP_AST_HASH_LOAD_SHIFT);
  182. log2 = dp_log2_ceil(hash_elems);
  183. hash_elems = 1 << log2;
  184. soc->ast_hash.mask = hash_elems - 1;
  185. soc->ast_hash.idx_bits = log2;
  186. /* allocate an array of TAILQ peer object lists */
  187. soc->ast_hash.bins = qdf_mem_malloc(
  188. hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
  189. dp_ast_entry)));
  190. if (!soc->ast_hash.bins)
  191. return QDF_STATUS_E_NOMEM;
  192. for (i = 0; i < hash_elems; i++)
  193. TAILQ_INIT(&soc->ast_hash.bins[i]);
  194. return 0;
  195. }
  196. /*
  197. * dp_peer_ast_cleanup() - cleanup the references
  198. * @soc: SoC handle
  199. * @ast: ast entry
  200. *
  201. * Return: None
  202. */
  203. static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
  204. struct dp_ast_entry *ast)
  205. {
  206. txrx_ast_free_cb cb = ast->callback;
  207. void *cookie = ast->cookie;
  208. /* Call the callbacks to free up the cookie */
  209. if (cb) {
  210. ast->callback = NULL;
  211. ast->cookie = NULL;
  212. cb(soc->ctrl_psoc,
  213. soc,
  214. cookie,
  215. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  216. }
  217. }
  218. /*
  219. * dp_peer_ast_hash_detach() - Free AST Hash table
  220. * @soc: SoC handle
  221. *
  222. * Return: None
  223. */
  224. static void dp_peer_ast_hash_detach(struct dp_soc *soc)
  225. {
  226. unsigned int index;
  227. struct dp_ast_entry *ast, *ast_next;
  228. if (!soc->ast_hash.mask)
  229. return;
  230. qdf_spin_lock_bh(&soc->ast_lock);
  231. for (index = 0; index <= soc->ast_hash.mask; index++) {
  232. if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
  233. TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
  234. hash_list_elem, ast_next) {
  235. TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
  236. hash_list_elem);
  237. dp_peer_ast_cleanup(soc, ast);
  238. qdf_mem_free(ast);
  239. }
  240. }
  241. }
  242. qdf_spin_unlock_bh(&soc->ast_lock);
  243. qdf_mem_free(soc->ast_hash.bins);
  244. }
  245. /*
  246. * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
  247. * @soc: SoC handle
  248. *
  249. * Return: AST hash
  250. */
  251. static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
  252. union dp_align_mac_addr *mac_addr)
  253. {
  254. uint32_t index;
  255. index =
  256. mac_addr->align2.bytes_ab ^
  257. mac_addr->align2.bytes_cd ^
  258. mac_addr->align2.bytes_ef;
  259. index ^= index >> soc->ast_hash.idx_bits;
  260. index &= soc->ast_hash.mask;
  261. return index;
  262. }
  263. /*
  264. * dp_peer_ast_hash_add() - Add AST entry into hash table
  265. * @soc: SoC handle
  266. *
  267. * This function adds the AST entry into SoC AST hash table
  268. * It assumes caller has taken the ast lock to protect the access to this table
  269. *
  270. * Return: None
  271. */
  272. static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
  273. struct dp_ast_entry *ase)
  274. {
  275. uint32_t index;
  276. index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
  277. TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
  278. }
  279. /*
  280. * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
  281. * @soc: SoC handle
  282. *
  283. * This function removes the AST entry from soc AST hash table
  284. * It assumes caller has taken the ast lock to protect the access to this table
  285. *
  286. * Return: None
  287. */
  288. static inline void dp_peer_ast_hash_remove(struct dp_soc *soc,
  289. struct dp_ast_entry *ase)
  290. {
  291. unsigned index;
  292. struct dp_ast_entry *tmpase;
  293. int found = 0;
  294. index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
  295. /* Check if tail is not empty before delete*/
  296. QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
  297. TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
  298. if (tmpase == ase) {
  299. found = 1;
  300. break;
  301. }
  302. }
  303. QDF_ASSERT(found);
  304. TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
  305. }
  306. /*
  307. * dp_peer_ast_list_find() - Find AST entry by MAC address from peer ast list
  308. * @soc: SoC handle
  309. * @peer: peer handle
  310. * @ast_mac_addr: mac address
  311. *
  312. * It assumes caller has taken the ast lock to protect the access to ast list
  313. *
  314. * Return: AST entry
  315. */
  316. struct dp_ast_entry *dp_peer_ast_list_find(struct dp_soc *soc,
  317. struct dp_peer *peer,
  318. uint8_t *ast_mac_addr)
  319. {
  320. struct dp_ast_entry *ast_entry = NULL;
  321. union dp_align_mac_addr *mac_addr =
  322. (union dp_align_mac_addr *)ast_mac_addr;
  323. TAILQ_FOREACH(ast_entry, &peer->ast_entry_list, ase_list_elem) {
  324. if (!dp_peer_find_mac_addr_cmp(mac_addr,
  325. &ast_entry->mac_addr)) {
  326. return ast_entry;
  327. }
  328. }
  329. return NULL;
  330. }
  331. /*
  332. * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
  333. * @soc: SoC handle
  334. *
  335. * It assumes caller has taken the ast lock to protect the access to
  336. * AST hash table
  337. *
  338. * Return: AST entry
  339. */
  340. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  341. uint8_t *ast_mac_addr,
  342. uint8_t pdev_id)
  343. {
  344. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  345. uint32_t index;
  346. struct dp_ast_entry *ase;
  347. qdf_mem_copy(&local_mac_addr_aligned.raw[0],
  348. ast_mac_addr, DP_MAC_ADDR_LEN);
  349. mac_addr = &local_mac_addr_aligned;
  350. index = dp_peer_ast_hash_index(soc, mac_addr);
  351. TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
  352. if ((pdev_id == ase->pdev_id) &&
  353. !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
  354. return ase;
  355. }
  356. }
  357. return NULL;
  358. }
  359. /*
  360. * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
  361. * @soc: SoC handle
  362. *
  363. * It assumes caller has taken the ast lock to protect the access to
  364. * AST hash table
  365. *
  366. * Return: AST entry
  367. */
  368. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  369. uint8_t *ast_mac_addr)
  370. {
  371. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  372. unsigned index;
  373. struct dp_ast_entry *ase;
  374. qdf_mem_copy(&local_mac_addr_aligned.raw[0],
  375. ast_mac_addr, DP_MAC_ADDR_LEN);
  376. mac_addr = &local_mac_addr_aligned;
  377. index = dp_peer_ast_hash_index(soc, mac_addr);
  378. TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
  379. if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
  380. return ase;
  381. }
  382. }
  383. return NULL;
  384. }
  385. /*
  386. * dp_peer_map_ast() - Map the ast entry with HW AST Index
  387. * @soc: SoC handle
  388. * @peer: peer to which ast node belongs
  389. * @mac_addr: MAC address of ast node
  390. * @hw_peer_id: HW AST Index returned by target in peer map event
  391. * @vdev_id: vdev id for VAP to which the peer belongs to
  392. * @ast_hash: ast hash value in HW
  393. *
  394. * Return: None
  395. */
  396. static inline void dp_peer_map_ast(struct dp_soc *soc,
  397. struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
  398. uint8_t vdev_id, uint16_t ast_hash)
  399. {
  400. struct dp_ast_entry *ast_entry = NULL;
  401. enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
  402. if (!peer) {
  403. return;
  404. }
  405. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  406. "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
  407. __func__, peer, hw_peer_id, vdev_id, mac_addr[0],
  408. mac_addr[1], mac_addr[2], mac_addr[3],
  409. mac_addr[4], mac_addr[5]);
  410. qdf_spin_lock_bh(&soc->ast_lock);
  411. ast_entry = dp_peer_ast_list_find(soc, peer, mac_addr);
  412. if (ast_entry) {
  413. ast_entry->ast_idx = hw_peer_id;
  414. soc->ast_table[hw_peer_id] = ast_entry;
  415. ast_entry->is_active = TRUE;
  416. peer_type = ast_entry->type;
  417. ast_entry->ast_hash_value = ast_hash;
  418. ast_entry->is_mapped = TRUE;
  419. }
  420. if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
  421. if (soc->cdp_soc.ol_ops->peer_map_event) {
  422. soc->cdp_soc.ol_ops->peer_map_event(
  423. soc->ctrl_psoc, peer->peer_ids[0],
  424. hw_peer_id, vdev_id,
  425. mac_addr, peer_type, ast_hash);
  426. }
  427. } else {
  428. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  429. "AST entry not found");
  430. }
  431. qdf_spin_unlock_bh(&soc->ast_lock);
  432. return;
  433. }
  434. void dp_peer_free_hmwds_cb(void *ctrl_psoc,
  435. void *dp_soc,
  436. void *cookie,
  437. enum cdp_ast_free_status status)
  438. {
  439. struct dp_ast_free_cb_params *param =
  440. (struct dp_ast_free_cb_params *)cookie;
  441. struct dp_soc *soc = (struct dp_soc *)dp_soc;
  442. struct dp_peer *peer = NULL;
  443. if (status != CDP_TXRX_AST_DELETED) {
  444. qdf_mem_free(cookie);
  445. return;
  446. }
  447. peer = dp_peer_find_hash_find(soc, &param->peer_mac_addr.raw[0],
  448. 0, param->vdev_id);
  449. if (peer) {
  450. dp_peer_add_ast(soc, peer,
  451. &param->mac_addr.raw[0],
  452. param->type,
  453. param->flags);
  454. dp_peer_unref_delete(peer);
  455. }
  456. qdf_mem_free(cookie);
  457. }
  458. /*
  459. * dp_peer_add_ast() - Allocate and add AST entry into peer list
  460. * @soc: SoC handle
  461. * @peer: peer to which ast node belongs
  462. * @mac_addr: MAC address of ast node
  463. * @is_self: Is this base AST entry with peer mac address
  464. *
  465. * This API is used by WDS source port learning function to
  466. * add a new AST entry into peer AST list
  467. *
  468. * Return: 0 if new entry is allocated,
  469. * -1 if entry add failed
  470. */
  471. int dp_peer_add_ast(struct dp_soc *soc,
  472. struct dp_peer *peer,
  473. uint8_t *mac_addr,
  474. enum cdp_txrx_ast_entry_type type,
  475. uint32_t flags)
  476. {
  477. struct dp_ast_entry *ast_entry = NULL;
  478. struct dp_vdev *vdev = NULL;
  479. struct dp_pdev *pdev = NULL;
  480. uint8_t next_node_mac[6];
  481. int ret = -1;
  482. txrx_ast_free_cb cb = NULL;
  483. void *cookie = NULL;
  484. if (peer->delete_in_progress)
  485. return ret;
  486. vdev = peer->vdev;
  487. if (!vdev) {
  488. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  489. FL("Peers vdev is NULL"));
  490. QDF_ASSERT(0);
  491. return ret;
  492. }
  493. pdev = vdev->pdev;
  494. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  495. "%s: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: %pM peer: %pK mac %pM",
  496. __func__, pdev->pdev_id, vdev->vdev_id, type, flags,
  497. peer->mac_addr.raw, peer, mac_addr);
  498. qdf_spin_lock_bh(&soc->ast_lock);
  499. /* If AST entry already exists , just return from here
  500. * ast entry with same mac address can exist on different radios
  501. * if ast_override support is enabled use search by pdev in this
  502. * case
  503. */
  504. if (soc->ast_override_support) {
  505. ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
  506. pdev->pdev_id);
  507. if (ast_entry) {
  508. qdf_spin_unlock_bh(&soc->ast_lock);
  509. return 0;
  510. }
  511. } else {
  512. /* For HWMWDS_SEC entries can be added for same mac address
  513. * do not check for existing entry
  514. */
  515. if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
  516. goto add_ast_entry;
  517. ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
  518. if (ast_entry) {
  519. if ((type == CDP_TXRX_AST_TYPE_MEC) &&
  520. (ast_entry->type == CDP_TXRX_AST_TYPE_MEC))
  521. ast_entry->is_active = TRUE;
  522. if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
  523. !ast_entry->delete_in_progress) {
  524. qdf_spin_unlock_bh(&soc->ast_lock);
  525. return 0;
  526. }
  527. /* Add for HMWDS entry we cannot be ignored if there
  528. * is AST entry with same mac address
  529. *
  530. * if ast entry exists with the requested mac address
  531. * send a delete command and register callback which
  532. * can take care of adding HMWDS ast enty on delete
  533. * confirmation from target
  534. */
  535. if ((type == CDP_TXRX_AST_TYPE_WDS_HM) &&
  536. soc->is_peer_map_unmap_v2) {
  537. struct dp_ast_free_cb_params *param = NULL;
  538. if (ast_entry->type ==
  539. CDP_TXRX_AST_TYPE_WDS_HM_SEC)
  540. goto add_ast_entry;
  541. /* save existing callback */
  542. if (ast_entry->callback) {
  543. cb = ast_entry->callback;
  544. cookie = ast_entry->cookie;
  545. }
  546. param = qdf_mem_malloc(sizeof(*param));
  547. if (!param) {
  548. QDF_TRACE(QDF_MODULE_ID_TXRX,
  549. QDF_TRACE_LEVEL_ERROR,
  550. "Allocation failed");
  551. qdf_spin_unlock_bh(&soc->ast_lock);
  552. return ret;
  553. }
  554. qdf_mem_copy(&param->mac_addr.raw[0], mac_addr,
  555. DP_MAC_ADDR_LEN);
  556. qdf_mem_copy(&param->peer_mac_addr.raw[0],
  557. &peer->mac_addr.raw[0],
  558. DP_MAC_ADDR_LEN);
  559. param->type = type;
  560. param->flags = flags;
  561. param->vdev_id = vdev->vdev_id;
  562. ast_entry->callback = dp_peer_free_hmwds_cb;
  563. ast_entry->cookie = (void *)param;
  564. if (!ast_entry->delete_in_progress)
  565. dp_peer_del_ast(soc, ast_entry);
  566. }
  567. /* Modify an already existing AST entry from type
  568. * WDS to MEC on promption. This serves as a fix when
  569. * backbone of interfaces are interchanged wherein
  570. * wds entr becomes its own MEC. The entry should be
  571. * replaced only when the ast_entry peer matches the
  572. * peer received in mec event. This additional check
  573. * is needed in wds repeater cases where a multicast
  574. * packet from station to the root via the repeater
  575. * should not remove the wds entry.
  576. */
  577. if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
  578. (type == CDP_TXRX_AST_TYPE_MEC) &&
  579. (ast_entry->peer == peer)) {
  580. ast_entry->is_active = FALSE;
  581. dp_peer_del_ast(soc, ast_entry);
  582. }
  583. qdf_spin_unlock_bh(&soc->ast_lock);
  584. /* Call the saved callback*/
  585. if (cb) {
  586. cb(soc->ctrl_psoc, soc, cookie,
  587. CDP_TXRX_AST_DELETE_IN_PROGRESS);
  588. }
  589. return 0;
  590. }
  591. }
  592. add_ast_entry:
  593. ast_entry = (struct dp_ast_entry *)
  594. qdf_mem_malloc(sizeof(struct dp_ast_entry));
  595. if (!ast_entry) {
  596. qdf_spin_unlock_bh(&soc->ast_lock);
  597. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  598. FL("fail to allocate ast_entry"));
  599. QDF_ASSERT(0);
  600. return ret;
  601. }
  602. qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, DP_MAC_ADDR_LEN);
  603. ast_entry->pdev_id = vdev->pdev->pdev_id;
  604. ast_entry->vdev_id = vdev->vdev_id;
  605. ast_entry->is_mapped = false;
  606. ast_entry->delete_in_progress = false;
  607. switch (type) {
  608. case CDP_TXRX_AST_TYPE_STATIC:
  609. peer->self_ast_entry = ast_entry;
  610. ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
  611. if (peer->vdev->opmode == wlan_op_mode_sta)
  612. ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
  613. break;
  614. case CDP_TXRX_AST_TYPE_SELF:
  615. peer->self_ast_entry = ast_entry;
  616. ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
  617. break;
  618. case CDP_TXRX_AST_TYPE_WDS:
  619. ast_entry->next_hop = 1;
  620. ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
  621. break;
  622. case CDP_TXRX_AST_TYPE_WDS_HM:
  623. ast_entry->next_hop = 1;
  624. ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
  625. break;
  626. case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
  627. ast_entry->next_hop = 1;
  628. ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
  629. break;
  630. case CDP_TXRX_AST_TYPE_MEC:
  631. ast_entry->next_hop = 1;
  632. ast_entry->type = CDP_TXRX_AST_TYPE_MEC;
  633. break;
  634. case CDP_TXRX_AST_TYPE_DA:
  635. peer = peer->vdev->vap_bss_peer;
  636. ast_entry->next_hop = 1;
  637. ast_entry->type = CDP_TXRX_AST_TYPE_DA;
  638. break;
  639. default:
  640. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  641. FL("Incorrect AST entry type"));
  642. }
  643. ast_entry->is_active = TRUE;
  644. DP_STATS_INC(soc, ast.added, 1);
  645. dp_peer_ast_hash_add(soc, ast_entry);
  646. ast_entry->peer = peer;
  647. if (type == CDP_TXRX_AST_TYPE_MEC)
  648. qdf_mem_copy(next_node_mac, peer->vdev->mac_addr.raw, 6);
  649. else
  650. qdf_mem_copy(next_node_mac, peer->mac_addr.raw, 6);
  651. TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
  652. qdf_spin_unlock_bh(&soc->ast_lock);
  653. if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
  654. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
  655. (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
  656. (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)) {
  657. if (QDF_STATUS_SUCCESS ==
  658. soc->cdp_soc.ol_ops->peer_add_wds_entry(
  659. peer->vdev->osif_vdev,
  660. (struct cdp_peer *)peer,
  661. mac_addr,
  662. next_node_mac,
  663. flags))
  664. return 0;
  665. }
  666. return ret;
  667. }
  668. /*
  669. * dp_peer_del_ast() - Delete and free AST entry
  670. * @soc: SoC handle
  671. * @ast_entry: AST entry of the node
  672. *
  673. * This function removes the AST entry from peer and soc tables
  674. * It assumes caller has taken the ast lock to protect the access to these
  675. * tables
  676. *
  677. * Return: None
  678. */
  679. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
  680. {
  681. struct dp_peer *peer = ast_entry->peer;
  682. uint16_t peer_id = peer->peer_ids[0];
  683. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  684. "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: %pM next_hop: %u peer_mac: %pM\n",
  685. __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
  686. peer->vdev->vdev_id, ast_entry->mac_addr.raw,
  687. ast_entry->next_hop, ast_entry->peer->mac_addr.raw);
  688. dp_peer_ast_send_wds_del(soc, ast_entry);
  689. /*
  690. * if peer map v2 is enabled we are not freeing ast entry
  691. * here and it is supposed to be freed in unmap event (after
  692. * we receive delete confirmation from target)
  693. *
  694. * if peer_id is invalid we did not get the peer map event
  695. * for the peer free ast entry from here only in this case
  696. */
  697. if (soc->is_peer_map_unmap_v2 && (peer_id != HTT_INVALID_PEER)) {
  698. /*
  699. * For HM_SEC and SELF type we do not receive unmap event
  700. * free ast_entry from here it self
  701. */
  702. if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
  703. (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
  704. return;
  705. }
  706. /*
  707. * release the reference only if it is mapped
  708. * to ast_table
  709. */
  710. if (ast_entry->is_mapped)
  711. soc->ast_table[ast_entry->ast_idx] = NULL;
  712. TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
  713. if (ast_entry == peer->self_ast_entry)
  714. peer->self_ast_entry = NULL;
  715. DP_STATS_INC(soc, ast.deleted, 1);
  716. dp_peer_ast_hash_remove(soc, ast_entry);
  717. dp_peer_ast_cleanup(soc, ast_entry);
  718. qdf_mem_free(ast_entry);
  719. }
  720. /*
  721. * dp_peer_update_ast() - Delete and free AST entry
  722. * @soc: SoC handle
  723. * @peer: peer to which ast node belongs
  724. * @ast_entry: AST entry of the node
  725. * @flags: wds or hmwds
  726. *
  727. * This function update the AST entry to the roamed peer and soc tables
  728. * It assumes caller has taken the ast lock to protect the access to these
  729. * tables
  730. *
  731. * Return: 0 if ast entry is updated successfully
  732. * -1 failure
  733. */
  734. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  735. struct dp_ast_entry *ast_entry, uint32_t flags)
  736. {
  737. int ret = -1;
  738. struct dp_peer *old_peer;
  739. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  740. "%s: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: %pM peer_mac: %pM\n",
  741. __func__, ast_entry->type, peer->vdev->pdev->pdev_id,
  742. peer->vdev->vdev_id, flags, ast_entry->mac_addr.raw,
  743. peer->mac_addr.raw);
  744. if (ast_entry->delete_in_progress)
  745. return ret;
  746. if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
  747. (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
  748. (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
  749. (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
  750. return 0;
  751. old_peer = ast_entry->peer;
  752. TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
  753. ast_entry->peer = peer;
  754. ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
  755. ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
  756. ast_entry->vdev_id = peer->vdev->vdev_id;
  757. ast_entry->is_active = TRUE;
  758. TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
  759. ret = soc->cdp_soc.ol_ops->peer_update_wds_entry(
  760. peer->vdev->osif_vdev,
  761. ast_entry->mac_addr.raw,
  762. peer->mac_addr.raw,
  763. flags);
  764. return ret;
  765. }
  766. /*
  767. * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
  768. * @soc: SoC handle
  769. * @ast_entry: AST entry of the node
  770. *
  771. * This function gets the pdev_id from the ast entry.
  772. *
  773. * Return: (uint8_t) pdev_id
  774. */
  775. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  776. struct dp_ast_entry *ast_entry)
  777. {
  778. return ast_entry->pdev_id;
  779. }
  780. /*
  781. * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
  782. * @soc: SoC handle
  783. * @ast_entry: AST entry of the node
  784. *
  785. * This function gets the next hop from the ast entry.
  786. *
  787. * Return: (uint8_t) next_hop
  788. */
  789. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  790. struct dp_ast_entry *ast_entry)
  791. {
  792. return ast_entry->next_hop;
  793. }
  794. /*
  795. * dp_peer_ast_set_type() - set type from the ast entry
  796. * @soc: SoC handle
  797. * @ast_entry: AST entry of the node
  798. *
  799. * This function sets the type in the ast entry.
  800. *
  801. * Return:
  802. */
  803. void dp_peer_ast_set_type(struct dp_soc *soc,
  804. struct dp_ast_entry *ast_entry,
  805. enum cdp_txrx_ast_entry_type type)
  806. {
  807. ast_entry->type = type;
  808. }
  809. #else
  810. int dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
  811. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  812. uint32_t flags)
  813. {
  814. return 1;
  815. }
  816. void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
  817. {
  818. }
  819. int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
  820. struct dp_ast_entry *ast_entry, uint32_t flags)
  821. {
  822. return 1;
  823. }
  824. struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
  825. uint8_t *ast_mac_addr)
  826. {
  827. return NULL;
  828. }
  829. struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
  830. uint8_t *ast_mac_addr,
  831. uint8_t pdev_id)
  832. {
  833. return NULL;
  834. }
  835. static int dp_peer_ast_hash_attach(struct dp_soc *soc)
  836. {
  837. return 0;
  838. }
  839. static inline void dp_peer_map_ast(struct dp_soc *soc,
  840. struct dp_peer *peer, uint8_t *mac_addr, uint16_t hw_peer_id,
  841. uint8_t vdev_id, uint16_t ast_hash)
  842. {
  843. return;
  844. }
  845. static void dp_peer_ast_hash_detach(struct dp_soc *soc)
  846. {
  847. }
  848. void dp_peer_ast_set_type(struct dp_soc *soc,
  849. struct dp_ast_entry *ast_entry,
  850. enum cdp_txrx_ast_entry_type type)
  851. {
  852. }
  853. uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
  854. struct dp_ast_entry *ast_entry)
  855. {
  856. return 0xff;
  857. }
  858. uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
  859. struct dp_ast_entry *ast_entry)
  860. {
  861. return 0xff;
  862. }
  863. #endif
  864. void dp_peer_ast_send_wds_del(struct dp_soc *soc,
  865. struct dp_ast_entry *ast_entry)
  866. {
  867. struct dp_peer *peer = ast_entry->peer;
  868. struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
  869. if (ast_entry->delete_in_progress)
  870. return;
  871. if (ast_entry->next_hop &&
  872. ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC)
  873. cdp_soc->ol_ops->peer_del_wds_entry(peer->vdev->osif_vdev,
  874. ast_entry->mac_addr.raw);
  875. ast_entry->delete_in_progress = true;
  876. }
  877. static void dp_peer_ast_free_entry(struct dp_soc *soc,
  878. struct dp_ast_entry *ast_entry)
  879. {
  880. struct dp_peer *peer = ast_entry->peer;
  881. void *cookie = NULL;
  882. txrx_ast_free_cb cb = NULL;
  883. /*
  884. * release the reference only if it is mapped
  885. * to ast_table
  886. */
  887. qdf_spin_lock_bh(&soc->ast_lock);
  888. if (ast_entry->is_mapped)
  889. soc->ast_table[ast_entry->ast_idx] = NULL;
  890. TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
  891. DP_STATS_INC(soc, ast.deleted, 1);
  892. dp_peer_ast_hash_remove(soc, ast_entry);
  893. cb = ast_entry->callback;
  894. cookie = ast_entry->cookie;
  895. ast_entry->callback = NULL;
  896. ast_entry->cookie = NULL;
  897. if (ast_entry == peer->self_ast_entry)
  898. peer->self_ast_entry = NULL;
  899. qdf_spin_unlock_bh(&soc->ast_lock);
  900. if (cb) {
  901. cb(soc->ctrl_psoc,
  902. soc,
  903. cookie,
  904. CDP_TXRX_AST_DELETED);
  905. }
  906. qdf_mem_free(ast_entry);
  907. }
  908. struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
  909. uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
  910. {
  911. union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
  912. unsigned index;
  913. struct dp_peer *peer;
  914. if (mac_addr_is_aligned) {
  915. mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
  916. } else {
  917. qdf_mem_copy(
  918. &local_mac_addr_aligned.raw[0],
  919. peer_mac_addr, DP_MAC_ADDR_LEN);
  920. mac_addr = &local_mac_addr_aligned;
  921. }
  922. index = dp_peer_find_hash_index(soc, mac_addr);
  923. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  924. TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
  925. #if ATH_SUPPORT_WRAP
  926. /* ProxySTA may have multiple BSS peer with same MAC address,
  927. * modified find will take care of finding the correct BSS peer.
  928. */
  929. if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
  930. ((peer->vdev->vdev_id == vdev_id) ||
  931. (vdev_id == DP_VDEV_ALL))) {
  932. #else
  933. if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
  934. #endif
  935. /* found it - increment the ref count before releasing
  936. * the lock
  937. */
  938. qdf_atomic_inc(&peer->ref_cnt);
  939. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  940. return peer;
  941. }
  942. }
  943. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  944. return NULL; /* failure */
  945. }
  946. void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
  947. {
  948. unsigned index;
  949. struct dp_peer *tmppeer = NULL;
  950. int found = 0;
  951. index = dp_peer_find_hash_index(soc, &peer->mac_addr);
  952. /* Check if tail is not empty before delete*/
  953. QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
  954. /*
  955. * DO NOT take the peer_ref_mutex lock here - it needs to be taken
  956. * by the caller.
  957. * The caller needs to hold the lock from the time the peer object's
  958. * reference count is decremented and tested up through the time the
  959. * reference to the peer object is removed from the hash table, by
  960. * this function.
  961. * Holding the lock only while removing the peer object reference
  962. * from the hash table keeps the hash table consistent, but does not
  963. * protect against a new HL tx context starting to use the peer object
  964. * if it looks up the peer object from its MAC address just after the
  965. * peer ref count is decremented to zero, but just before the peer
  966. * object reference is removed from the hash table.
  967. */
  968. TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
  969. if (tmppeer == peer) {
  970. found = 1;
  971. break;
  972. }
  973. }
  974. QDF_ASSERT(found);
  975. TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
  976. }
  977. void dp_peer_find_hash_erase(struct dp_soc *soc)
  978. {
  979. int i;
  980. /*
  981. * Not really necessary to take peer_ref_mutex lock - by this point,
  982. * it's known that the soc is no longer in use.
  983. */
  984. for (i = 0; i <= soc->peer_hash.mask; i++) {
  985. if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
  986. struct dp_peer *peer, *peer_next;
  987. /*
  988. * TAILQ_FOREACH_SAFE must be used here to avoid any
  989. * memory access violation after peer is freed
  990. */
  991. TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
  992. hash_list_elem, peer_next) {
  993. /*
  994. * Don't remove the peer from the hash table -
  995. * that would modify the list we are currently
  996. * traversing, and it's not necessary anyway.
  997. */
  998. /*
  999. * Artificially adjust the peer's ref count to
  1000. * 1, so it will get deleted by
  1001. * dp_peer_unref_delete.
  1002. */
  1003. /* set to zero */
  1004. qdf_atomic_init(&peer->ref_cnt);
  1005. /* incr to one */
  1006. qdf_atomic_inc(&peer->ref_cnt);
  1007. dp_peer_unref_delete(peer);
  1008. }
  1009. }
  1010. }
  1011. }
  1012. static void dp_peer_find_map_detach(struct dp_soc *soc)
  1013. {
  1014. qdf_mem_free(soc->peer_id_to_obj_map);
  1015. }
  1016. int dp_peer_find_attach(struct dp_soc *soc)
  1017. {
  1018. if (dp_peer_find_map_attach(soc))
  1019. return 1;
  1020. if (dp_peer_find_hash_attach(soc)) {
  1021. dp_peer_find_map_detach(soc);
  1022. return 1;
  1023. }
  1024. if (dp_peer_ast_hash_attach(soc)) {
  1025. dp_peer_find_hash_detach(soc);
  1026. dp_peer_find_map_detach(soc);
  1027. return 1;
  1028. }
  1029. return 0; /* success */
  1030. }
  1031. void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
  1032. union hal_reo_status *reo_status)
  1033. {
  1034. struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
  1035. struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
  1036. if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
  1037. DP_TRACE_STATS(FATAL, "REO stats failure %d for TID %d\n",
  1038. queue_status->header.status, rx_tid->tid);
  1039. return;
  1040. }
  1041. DP_TRACE_STATS(FATAL, "REO queue stats (TID: %d): \n"
  1042. "ssn: %d\n"
  1043. "curr_idx : %d\n"
  1044. "pn_31_0 : %08x\n"
  1045. "pn_63_32 : %08x\n"
  1046. "pn_95_64 : %08x\n"
  1047. "pn_127_96 : %08x\n"
  1048. "last_rx_enq_tstamp : %08x\n"
  1049. "last_rx_deq_tstamp : %08x\n"
  1050. "rx_bitmap_31_0 : %08x\n"
  1051. "rx_bitmap_63_32 : %08x\n"
  1052. "rx_bitmap_95_64 : %08x\n"
  1053. "rx_bitmap_127_96 : %08x\n"
  1054. "rx_bitmap_159_128 : %08x\n"
  1055. "rx_bitmap_191_160 : %08x\n"
  1056. "rx_bitmap_223_192 : %08x\n"
  1057. "rx_bitmap_255_224 : %08x\n",
  1058. rx_tid->tid,
  1059. queue_status->ssn, queue_status->curr_idx,
  1060. queue_status->pn_31_0, queue_status->pn_63_32,
  1061. queue_status->pn_95_64, queue_status->pn_127_96,
  1062. queue_status->last_rx_enq_tstamp,
  1063. queue_status->last_rx_deq_tstamp,
  1064. queue_status->rx_bitmap_31_0, queue_status->rx_bitmap_63_32,
  1065. queue_status->rx_bitmap_95_64, queue_status->rx_bitmap_127_96,
  1066. queue_status->rx_bitmap_159_128,
  1067. queue_status->rx_bitmap_191_160,
  1068. queue_status->rx_bitmap_223_192,
  1069. queue_status->rx_bitmap_255_224);
  1070. DP_TRACE_STATS(FATAL,
  1071. "curr_mpdu_cnt : %d\n"
  1072. "curr_msdu_cnt : %d\n"
  1073. "fwd_timeout_cnt : %d\n"
  1074. "fwd_bar_cnt : %d\n"
  1075. "dup_cnt : %d\n"
  1076. "frms_in_order_cnt : %d\n"
  1077. "bar_rcvd_cnt : %d\n"
  1078. "mpdu_frms_cnt : %d\n"
  1079. "msdu_frms_cnt : %d\n"
  1080. "total_byte_cnt : %d\n"
  1081. "late_recv_mpdu_cnt : %d\n"
  1082. "win_jump_2k : %d\n"
  1083. "hole_cnt : %d\n",
  1084. queue_status->curr_mpdu_cnt, queue_status->curr_msdu_cnt,
  1085. queue_status->fwd_timeout_cnt, queue_status->fwd_bar_cnt,
  1086. queue_status->dup_cnt, queue_status->frms_in_order_cnt,
  1087. queue_status->bar_rcvd_cnt, queue_status->mpdu_frms_cnt,
  1088. queue_status->msdu_frms_cnt, queue_status->total_cnt,
  1089. queue_status->late_recv_mpdu_cnt, queue_status->win_jump_2k,
  1090. queue_status->hole_cnt);
  1091. DP_PRINT_STATS("Addba Req : %d\n"
  1092. "Addba Resp : %d\n"
  1093. "Addba Resp success : %d\n"
  1094. "Addba Resp failed : %d\n"
  1095. "Delba Req received : %d\n"
  1096. "Delba Tx success : %d\n"
  1097. "Delba Tx Fail : %d\n"
  1098. "BA window size : %d\n"
  1099. "Pn size : %d\n",
  1100. rx_tid->num_of_addba_req,
  1101. rx_tid->num_of_addba_resp,
  1102. rx_tid->num_addba_rsp_success,
  1103. rx_tid->num_addba_rsp_failed,
  1104. rx_tid->num_of_delba_req,
  1105. rx_tid->delba_tx_success_cnt,
  1106. rx_tid->delba_tx_fail_cnt,
  1107. rx_tid->ba_win_size,
  1108. rx_tid->pn_size);
  1109. }
  1110. static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
  1111. uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
  1112. uint8_t vdev_id)
  1113. {
  1114. struct dp_peer *peer;
  1115. QDF_ASSERT(peer_id <= soc->max_peers);
  1116. /* check if there's already a peer object with this MAC address */
  1117. peer = dp_peer_find_hash_find(soc, peer_mac_addr,
  1118. 0 /* is aligned */, vdev_id);
  1119. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1120. "%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
  1121. __func__, peer, peer_id, vdev_id, peer_mac_addr[0],
  1122. peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
  1123. peer_mac_addr[4], peer_mac_addr[5]);
  1124. if (peer) {
  1125. /* peer's ref count was already incremented by
  1126. * peer_find_hash_find
  1127. */
  1128. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  1129. "%s: ref_cnt: %d", __func__,
  1130. qdf_atomic_read(&peer->ref_cnt));
  1131. if (!soc->peer_id_to_obj_map[peer_id])
  1132. soc->peer_id_to_obj_map[peer_id] = peer;
  1133. else {
  1134. /* Peer map event came for peer_id which
  1135. * is already mapped, this is not expected
  1136. */
  1137. QDF_ASSERT(0);
  1138. }
  1139. if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
  1140. /* TBDXXX: assert for now */
  1141. QDF_ASSERT(0);
  1142. }
  1143. return peer;
  1144. }
  1145. return NULL;
  1146. }
  1147. /**
  1148. * dp_rx_peer_map_handler() - handle peer map event from firmware
  1149. * @soc_handle - genereic soc handle
  1150. * @peeri_id - peer_id from firmware
  1151. * @hw_peer_id - ast index for this peer
  1152. * @vdev_id - vdev ID
  1153. * @peer_mac_addr - mac address of the peer
  1154. * @ast_hash - ast hash value
  1155. * @is_wds - flag to indicate peer map event for WDS ast entry
  1156. *
  1157. * associate the peer_id that firmware provided with peer entry
  1158. * and update the ast table in the host with the hw_peer_id.
  1159. *
  1160. * Return: none
  1161. */
  1162. void
  1163. dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
  1164. uint16_t hw_peer_id, uint8_t vdev_id,
  1165. uint8_t *peer_mac_addr, uint16_t ast_hash,
  1166. uint8_t is_wds)
  1167. {
  1168. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  1169. struct dp_peer *peer = NULL;
  1170. enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
  1171. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  1172. "peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
  1173. "%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id,
  1174. hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
  1175. peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
  1176. peer_mac_addr[5], vdev_id);
  1177. if ((hw_peer_id < 0) || (hw_peer_id > (WLAN_UMAC_PSOC_MAX_PEERS * 2))) {
  1178. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1179. "invalid hw_peer_id: %d", hw_peer_id);
  1180. qdf_assert_always(0);
  1181. }
  1182. /* Peer map event for WDS ast entry get the peer from
  1183. * obj map
  1184. */
  1185. if (is_wds) {
  1186. peer = soc->peer_id_to_obj_map[peer_id];
  1187. } else {
  1188. peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
  1189. hw_peer_id, vdev_id);
  1190. if (peer) {
  1191. /*
  1192. * For every peer Map message search and set if bss_peer
  1193. */
  1194. if (!(qdf_mem_cmp(peer->mac_addr.raw,
  1195. peer->vdev->mac_addr.raw,
  1196. DP_MAC_ADDR_LEN))) {
  1197. QDF_TRACE(QDF_MODULE_ID_DP,
  1198. QDF_TRACE_LEVEL_INFO_HIGH,
  1199. "vdev bss_peer!!!!");
  1200. peer->bss_peer = 1;
  1201. peer->vdev->vap_bss_peer = peer;
  1202. }
  1203. if (peer->vdev->opmode == wlan_op_mode_sta)
  1204. peer->vdev->bss_ast_hash = ast_hash;
  1205. /* Add ast entry incase self ast entry is
  1206. * deleted due to DP CP sync issue
  1207. *
  1208. * self_ast_entry is modified in peer create
  1209. * and peer unmap path which cannot run in
  1210. * parllel with peer map, no lock need before
  1211. * referring it
  1212. */
  1213. if (!peer->self_ast_entry) {
  1214. QDF_TRACE(QDF_MODULE_ID_DP,
  1215. QDF_TRACE_LEVEL_INFO_HIGH,
  1216. "Add self ast from map %pM",
  1217. peer_mac_addr);
  1218. dp_peer_add_ast(soc, peer,
  1219. peer_mac_addr,
  1220. type, 0);
  1221. }
  1222. }
  1223. }
  1224. dp_peer_map_ast(soc, peer, peer_mac_addr,
  1225. hw_peer_id, vdev_id, ast_hash);
  1226. }
  1227. /**
  1228. * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
  1229. * @soc_handle - genereic soc handle
  1230. * @peeri_id - peer_id from firmware
  1231. * @vdev_id - vdev ID
  1232. * @mac_addr - mac address of the peer or wds entry
  1233. * @is_wds - flag to indicate peer map event for WDS ast entry
  1234. *
  1235. * Return: none
  1236. */
  1237. void
  1238. dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id,
  1239. uint8_t vdev_id, uint8_t *mac_addr,
  1240. uint8_t is_wds)
  1241. {
  1242. struct dp_peer *peer;
  1243. struct dp_ast_entry *ast_entry;
  1244. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  1245. uint8_t i;
  1246. peer = __dp_peer_find_by_id(soc, peer_id);
  1247. /*
  1248. * Currently peer IDs are assigned for vdevs as well as peers.
  1249. * If the peer ID is for a vdev, then the peer pointer stored
  1250. * in peer_id_to_obj_map will be NULL.
  1251. */
  1252. if (!peer) {
  1253. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1254. "%s: Received unmap event for invalid peer_id"
  1255. " %u", __func__, peer_id);
  1256. return;
  1257. }
  1258. /* If V2 Peer map messages are enabled AST entry has to be freed here
  1259. */
  1260. if (soc->is_peer_map_unmap_v2) {
  1261. qdf_spin_lock_bh(&soc->ast_lock);
  1262. ast_entry = dp_peer_ast_list_find(soc, peer,
  1263. mac_addr);
  1264. if (!ast_entry) {
  1265. /* in case of qwrap we have multiple BSS peers
  1266. * with same mac address
  1267. *
  1268. * AST entry for this mac address will be created
  1269. * only for one peer
  1270. */
  1271. if (peer->vdev->proxysta_vdev) {
  1272. qdf_spin_unlock_bh(&soc->ast_lock);
  1273. goto peer_unmap;
  1274. }
  1275. /* Ideally we should not enter this case where
  1276. * ast_entry is not present in host table and
  1277. * we received a unmap event
  1278. */
  1279. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  1280. "%s:%d AST entry not found with peer %pK peer_id %u peer_mac %pM mac_addr %pM vdev_id %u next_hop %u\n",
  1281. __func__, __LINE__, peer, peer->peer_ids[0],
  1282. peer->mac_addr.raw, mac_addr, vdev_id,
  1283. is_wds);
  1284. if (!is_wds) {
  1285. qdf_spin_unlock_bh(&soc->ast_lock);
  1286. goto peer_unmap;
  1287. }
  1288. }
  1289. qdf_spin_unlock_bh(&soc->ast_lock);
  1290. /* Reuse the AST entry if delete_in_progress
  1291. * not set
  1292. */
  1293. if (ast_entry->delete_in_progress)
  1294. dp_peer_ast_free_entry(soc, ast_entry);
  1295. if (is_wds)
  1296. return;
  1297. }
  1298. peer_unmap:
  1299. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  1300. "peer_unmap_event (soc:%pK) peer_id %d peer %pK",
  1301. soc, peer_id, peer);
  1302. soc->peer_id_to_obj_map[peer_id] = NULL;
  1303. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
  1304. if (peer->peer_ids[i] == peer_id) {
  1305. peer->peer_ids[i] = HTT_INVALID_PEER;
  1306. break;
  1307. }
  1308. }
  1309. if (soc->cdp_soc.ol_ops->peer_unmap_event) {
  1310. soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
  1311. peer_id);
  1312. }
  1313. /*
  1314. * Remove a reference to the peer.
  1315. * If there are no more references, delete the peer object.
  1316. */
  1317. dp_peer_unref_delete(peer);
  1318. }
  1319. void
  1320. dp_peer_find_detach(struct dp_soc *soc)
  1321. {
  1322. dp_peer_find_map_detach(soc);
  1323. dp_peer_find_hash_detach(soc);
  1324. dp_peer_ast_hash_detach(soc);
  1325. }
  1326. static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
  1327. union hal_reo_status *reo_status)
  1328. {
  1329. struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
  1330. if ((reo_status->rx_queue_status.header.status !=
  1331. HAL_REO_CMD_SUCCESS) &&
  1332. (reo_status->rx_queue_status.header.status !=
  1333. HAL_REO_CMD_DRAIN)) {
  1334. /* Should not happen normally. Just print error for now */
  1335. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1336. "%s: Rx tid HW desc update failed(%d): tid %d",
  1337. __func__,
  1338. reo_status->rx_queue_status.header.status,
  1339. rx_tid->tid);
  1340. }
  1341. }
  1342. /*
  1343. * dp_find_peer_by_addr - find peer instance by mac address
  1344. * @dev: physical device instance
  1345. * @peer_mac_addr: peer mac address
  1346. * @local_id: local id for the peer
  1347. *
  1348. * Return: peer instance pointer
  1349. */
  1350. void *dp_find_peer_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
  1351. uint8_t *local_id)
  1352. {
  1353. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  1354. struct dp_peer *peer;
  1355. peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
  1356. if (!peer)
  1357. return NULL;
  1358. /* Multiple peer ids? How can know peer id? */
  1359. *local_id = peer->local_id;
  1360. DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
  1361. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  1362. * Decrement it here.
  1363. */
  1364. dp_peer_unref_delete(peer);
  1365. return peer;
  1366. }
  1367. /*
  1368. * dp_rx_tid_update_wifi3() – Update receive TID state
  1369. * @peer: Datapath peer handle
  1370. * @tid: TID
  1371. * @ba_window_size: BlockAck window size
  1372. * @start_seq: Starting sequence number
  1373. *
  1374. * Return: 0 on success, error code on failure
  1375. */
  1376. static int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
  1377. ba_window_size, uint32_t start_seq)
  1378. {
  1379. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  1380. struct dp_soc *soc = peer->vdev->pdev->soc;
  1381. struct hal_reo_cmd_params params;
  1382. qdf_mem_zero(&params, sizeof(params));
  1383. params.std.need_status = 1;
  1384. params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
  1385. params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  1386. params.u.upd_queue_params.update_ba_window_size = 1;
  1387. params.u.upd_queue_params.ba_window_size = ba_window_size;
  1388. if (start_seq < IEEE80211_SEQ_MAX) {
  1389. params.u.upd_queue_params.update_ssn = 1;
  1390. params.u.upd_queue_params.ssn = start_seq;
  1391. }
  1392. dp_set_ssn_valid_flag(&params, 0);
  1393. dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params, dp_rx_tid_update_cb, rx_tid);
  1394. rx_tid->ba_win_size = ba_window_size;
  1395. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  1396. soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  1397. peer->vdev->pdev->ctrl_pdev,
  1398. peer->vdev->vdev_id, peer->mac_addr.raw,
  1399. rx_tid->hw_qdesc_paddr, tid, tid, 1, ba_window_size);
  1400. }
  1401. return 0;
  1402. }
  1403. /*
  1404. * dp_reo_desc_free() - Callback free reo descriptor memory after
  1405. * HW cache flush
  1406. *
  1407. * @soc: DP SOC handle
  1408. * @cb_ctxt: Callback context
  1409. * @reo_status: REO command status
  1410. */
  1411. static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
  1412. union hal_reo_status *reo_status)
  1413. {
  1414. struct reo_desc_list_node *freedesc =
  1415. (struct reo_desc_list_node *)cb_ctxt;
  1416. struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
  1417. if ((reo_status->fl_cache_status.header.status !=
  1418. HAL_REO_CMD_SUCCESS) &&
  1419. (reo_status->fl_cache_status.header.status !=
  1420. HAL_REO_CMD_DRAIN)) {
  1421. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1422. "%s: Rx tid HW desc flush failed(%d): tid %d",
  1423. __func__,
  1424. reo_status->rx_queue_status.header.status,
  1425. freedesc->rx_tid.tid);
  1426. }
  1427. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1428. "%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
  1429. (void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
  1430. qdf_mem_unmap_nbytes_single(soc->osdev,
  1431. rx_tid->hw_qdesc_paddr,
  1432. QDF_DMA_BIDIRECTIONAL,
  1433. rx_tid->hw_qdesc_alloc_size);
  1434. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1435. qdf_mem_free(freedesc);
  1436. }
  1437. #if defined(QCA_WIFI_QCA8074) && defined(BUILD_X86)
  1438. /* Hawkeye emulation requires bus address to be >= 0x50000000 */
  1439. static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
  1440. {
  1441. if (dma_addr < 0x50000000)
  1442. return QDF_STATUS_E_FAILURE;
  1443. else
  1444. return QDF_STATUS_SUCCESS;
  1445. }
  1446. #else
  1447. static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
  1448. {
  1449. return QDF_STATUS_SUCCESS;
  1450. }
  1451. #endif
  1452. /*
  1453. * dp_rx_tid_setup_wifi3() – Setup receive TID state
  1454. * @peer: Datapath peer handle
  1455. * @tid: TID
  1456. * @ba_window_size: BlockAck window size
  1457. * @start_seq: Starting sequence number
  1458. *
  1459. * Return: 0 on success, error code on failure
  1460. */
  1461. int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
  1462. uint32_t ba_window_size, uint32_t start_seq)
  1463. {
  1464. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  1465. struct dp_vdev *vdev = peer->vdev;
  1466. struct dp_soc *soc = vdev->pdev->soc;
  1467. uint32_t hw_qdesc_size;
  1468. uint32_t hw_qdesc_align;
  1469. int hal_pn_type;
  1470. void *hw_qdesc_vaddr;
  1471. uint32_t alloc_tries = 0;
  1472. int err = QDF_STATUS_SUCCESS;
  1473. if (peer->delete_in_progress ||
  1474. !qdf_atomic_read(&peer->is_default_route_set))
  1475. return QDF_STATUS_E_FAILURE;
  1476. rx_tid->ba_win_size = ba_window_size;
  1477. if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
  1478. return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
  1479. start_seq);
  1480. rx_tid->delba_tx_status = 0;
  1481. rx_tid->ppdu_id_2k = 0;
  1482. rx_tid->num_of_addba_req = 0;
  1483. rx_tid->num_of_delba_req = 0;
  1484. rx_tid->num_of_addba_resp = 0;
  1485. rx_tid->num_addba_rsp_failed = 0;
  1486. rx_tid->num_addba_rsp_success = 0;
  1487. rx_tid->delba_tx_success_cnt = 0;
  1488. rx_tid->delba_tx_fail_cnt = 0;
  1489. rx_tid->statuscode = 0;
  1490. /* TODO: Allocating HW queue descriptors based on max BA window size
  1491. * for all QOS TIDs so that same descriptor can be used later when
  1492. * ADDBA request is recevied. This should be changed to allocate HW
  1493. * queue descriptors based on BA window size being negotiated (0 for
  1494. * non BA cases), and reallocate when BA window size changes and also
  1495. * send WMI message to FW to change the REO queue descriptor in Rx
  1496. * peer entry as part of dp_rx_tid_update.
  1497. */
  1498. if (tid != DP_NON_QOS_TID)
  1499. hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
  1500. HAL_RX_MAX_BA_WINDOW, tid);
  1501. else
  1502. hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
  1503. ba_window_size, tid);
  1504. hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
  1505. /* To avoid unnecessary extra allocation for alignment, try allocating
  1506. * exact size and see if we already have aligned address.
  1507. */
  1508. rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
  1509. try_desc_alloc:
  1510. rx_tid->hw_qdesc_vaddr_unaligned =
  1511. qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
  1512. if (!rx_tid->hw_qdesc_vaddr_unaligned) {
  1513. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1514. "%s: Rx tid HW desc alloc failed: tid %d",
  1515. __func__, tid);
  1516. return QDF_STATUS_E_NOMEM;
  1517. }
  1518. if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
  1519. hw_qdesc_align) {
  1520. /* Address allocated above is not alinged. Allocate extra
  1521. * memory for alignment
  1522. */
  1523. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1524. rx_tid->hw_qdesc_vaddr_unaligned =
  1525. qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
  1526. hw_qdesc_align - 1);
  1527. if (!rx_tid->hw_qdesc_vaddr_unaligned) {
  1528. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1529. "%s: Rx tid HW desc alloc failed: tid %d",
  1530. __func__, tid);
  1531. return QDF_STATUS_E_NOMEM;
  1532. }
  1533. hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
  1534. rx_tid->hw_qdesc_vaddr_unaligned,
  1535. hw_qdesc_align);
  1536. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1537. "%s: Total Size %d Aligned Addr %pK",
  1538. __func__, rx_tid->hw_qdesc_alloc_size,
  1539. hw_qdesc_vaddr);
  1540. } else {
  1541. hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
  1542. }
  1543. /* TODO: Ensure that sec_type is set before ADDBA is received.
  1544. * Currently this is set based on htt indication
  1545. * HTT_T2H_MSG_TYPE_SEC_IND from target
  1546. */
  1547. switch (peer->security[dp_sec_ucast].sec_type) {
  1548. case cdp_sec_type_tkip_nomic:
  1549. case cdp_sec_type_aes_ccmp:
  1550. case cdp_sec_type_aes_ccmp_256:
  1551. case cdp_sec_type_aes_gcmp:
  1552. case cdp_sec_type_aes_gcmp_256:
  1553. hal_pn_type = HAL_PN_WPA;
  1554. break;
  1555. case cdp_sec_type_wapi:
  1556. if (vdev->opmode == wlan_op_mode_ap)
  1557. hal_pn_type = HAL_PN_WAPI_EVEN;
  1558. else
  1559. hal_pn_type = HAL_PN_WAPI_UNEVEN;
  1560. break;
  1561. default:
  1562. hal_pn_type = HAL_PN_NONE;
  1563. break;
  1564. }
  1565. hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
  1566. hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
  1567. qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
  1568. QDF_DMA_BIDIRECTIONAL, rx_tid->hw_qdesc_alloc_size,
  1569. &(rx_tid->hw_qdesc_paddr));
  1570. if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
  1571. QDF_STATUS_SUCCESS) {
  1572. if (alloc_tries++ < 10) {
  1573. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1574. rx_tid->hw_qdesc_vaddr_unaligned = NULL;
  1575. goto try_desc_alloc;
  1576. } else {
  1577. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1578. "%s: Rx tid HW desc alloc failed (lowmem): tid %d",
  1579. __func__, tid);
  1580. err = QDF_STATUS_E_NOMEM;
  1581. goto error;
  1582. }
  1583. }
  1584. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
  1585. if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
  1586. vdev->pdev->ctrl_pdev, peer->vdev->vdev_id,
  1587. peer->mac_addr.raw, rx_tid->hw_qdesc_paddr, tid, tid,
  1588. 1, ba_window_size)) {
  1589. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1590. "%s: Failed to send reo queue setup to FW - tid %d\n",
  1591. __func__, tid);
  1592. err = QDF_STATUS_E_FAILURE;
  1593. goto error;
  1594. }
  1595. }
  1596. return 0;
  1597. error:
  1598. if (NULL != rx_tid->hw_qdesc_vaddr_unaligned) {
  1599. if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
  1600. QDF_STATUS_SUCCESS)
  1601. qdf_mem_unmap_nbytes_single(
  1602. soc->osdev,
  1603. rx_tid->hw_qdesc_paddr,
  1604. QDF_DMA_BIDIRECTIONAL,
  1605. rx_tid->hw_qdesc_alloc_size);
  1606. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1607. rx_tid->hw_qdesc_vaddr_unaligned = NULL;
  1608. }
  1609. return err;
  1610. }
  1611. /*
  1612. * dp_rx_tid_delete_cb() - Callback to flush reo descriptor HW cache
  1613. * after deleting the entries (ie., setting valid=0)
  1614. *
  1615. * @soc: DP SOC handle
  1616. * @cb_ctxt: Callback context
  1617. * @reo_status: REO command status
  1618. */
  1619. static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
  1620. union hal_reo_status *reo_status)
  1621. {
  1622. struct reo_desc_list_node *freedesc =
  1623. (struct reo_desc_list_node *)cb_ctxt;
  1624. uint32_t list_size;
  1625. struct reo_desc_list_node *desc;
  1626. unsigned long curr_ts = qdf_get_system_timestamp();
  1627. uint32_t desc_size, tot_desc_size;
  1628. struct hal_reo_cmd_params params;
  1629. if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
  1630. qdf_mem_zero(reo_status, sizeof(*reo_status));
  1631. reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
  1632. dp_reo_desc_free(soc, (void *)freedesc, reo_status);
  1633. return;
  1634. } else if (reo_status->rx_queue_status.header.status !=
  1635. HAL_REO_CMD_SUCCESS) {
  1636. /* Should not happen normally. Just print error for now */
  1637. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1638. "%s: Rx tid HW desc deletion failed(%d): tid %d",
  1639. __func__,
  1640. reo_status->rx_queue_status.header.status,
  1641. freedesc->rx_tid.tid);
  1642. }
  1643. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  1644. "%s: rx_tid: %d status: %d", __func__,
  1645. freedesc->rx_tid.tid,
  1646. reo_status->rx_queue_status.header.status);
  1647. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  1648. freedesc->free_ts = curr_ts;
  1649. qdf_list_insert_back_size(&soc->reo_desc_freelist,
  1650. (qdf_list_node_t *)freedesc, &list_size);
  1651. while ((qdf_list_peek_front(&soc->reo_desc_freelist,
  1652. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
  1653. ((list_size >= REO_DESC_FREELIST_SIZE) ||
  1654. ((curr_ts - desc->free_ts) > REO_DESC_FREE_DEFER_MS))) {
  1655. struct dp_rx_tid *rx_tid;
  1656. qdf_list_remove_front(&soc->reo_desc_freelist,
  1657. (qdf_list_node_t **)&desc);
  1658. list_size--;
  1659. rx_tid = &desc->rx_tid;
  1660. /* Flush and invalidate REO descriptor from HW cache: Base and
  1661. * extension descriptors should be flushed separately */
  1662. tot_desc_size = rx_tid->hw_qdesc_alloc_size;
  1663. /* Get base descriptor size by passing non-qos TID */
  1664. desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
  1665. DP_NON_QOS_TID);
  1666. /* Flush reo extension descriptors */
  1667. while ((tot_desc_size -= desc_size) > 0) {
  1668. qdf_mem_zero(&params, sizeof(params));
  1669. params.std.addr_lo =
  1670. ((uint64_t)(rx_tid->hw_qdesc_paddr) +
  1671. tot_desc_size) & 0xffffffff;
  1672. params.std.addr_hi =
  1673. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  1674. if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
  1675. CMD_FLUSH_CACHE,
  1676. &params,
  1677. NULL,
  1678. NULL)) {
  1679. QDF_TRACE(QDF_MODULE_ID_DP,
  1680. QDF_TRACE_LEVEL_ERROR,
  1681. "%s: fail to send CMD_CACHE_FLUSH:"
  1682. "tid %d desc %pK", __func__,
  1683. rx_tid->tid,
  1684. (void *)(rx_tid->hw_qdesc_paddr));
  1685. }
  1686. }
  1687. /* Flush base descriptor */
  1688. qdf_mem_zero(&params, sizeof(params));
  1689. params.std.need_status = 1;
  1690. params.std.addr_lo =
  1691. (uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
  1692. params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  1693. if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
  1694. CMD_FLUSH_CACHE,
  1695. &params,
  1696. dp_reo_desc_free,
  1697. (void *)desc)) {
  1698. union hal_reo_status reo_status;
  1699. /*
  1700. * If dp_reo_send_cmd return failure, related TID queue desc
  1701. * should be unmapped. Also locally reo_desc, together with
  1702. * TID queue desc also need to be freed accordingly.
  1703. *
  1704. * Here invoke desc_free function directly to do clean up.
  1705. */
  1706. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1707. "%s: fail to send REO cmd to flush cache: tid %d",
  1708. __func__, rx_tid->tid);
  1709. qdf_mem_zero(&reo_status, sizeof(reo_status));
  1710. reo_status.fl_cache_status.header.status = 0;
  1711. dp_reo_desc_free(soc, (void *)desc, &reo_status);
  1712. }
  1713. }
  1714. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  1715. }
  1716. /*
  1717. * dp_rx_tid_delete_wifi3() – Delete receive TID queue
  1718. * @peer: Datapath peer handle
  1719. * @tid: TID
  1720. *
  1721. * Return: 0 on success, error code on failure
  1722. */
  1723. static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
  1724. {
  1725. struct dp_rx_tid *rx_tid = &(peer->rx_tid[tid]);
  1726. struct dp_soc *soc = peer->vdev->pdev->soc;
  1727. struct hal_reo_cmd_params params;
  1728. struct reo_desc_list_node *freedesc =
  1729. qdf_mem_malloc(sizeof(*freedesc));
  1730. if (!freedesc) {
  1731. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1732. "%s: malloc failed for freedesc: tid %d",
  1733. __func__, tid);
  1734. return -ENOMEM;
  1735. }
  1736. freedesc->rx_tid = *rx_tid;
  1737. qdf_mem_zero(&params, sizeof(params));
  1738. params.std.need_status = 1;
  1739. params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
  1740. params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  1741. params.u.upd_queue_params.update_vld = 1;
  1742. params.u.upd_queue_params.vld = 0;
  1743. dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
  1744. dp_rx_tid_delete_cb, (void *)freedesc);
  1745. rx_tid->hw_qdesc_vaddr_unaligned = NULL;
  1746. rx_tid->hw_qdesc_alloc_size = 0;
  1747. rx_tid->hw_qdesc_paddr = 0;
  1748. return 0;
  1749. }
  1750. #ifdef DP_LFR
  1751. static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
  1752. {
  1753. int tid;
  1754. for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
  1755. dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
  1756. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1757. "Setting up TID %d for peer %pK peer->local_id %d",
  1758. tid, peer, peer->local_id);
  1759. }
  1760. }
  1761. #else
  1762. static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
  1763. #endif
  1764. /*
  1765. * dp_peer_rx_init() – Initialize receive TID state
  1766. * @pdev: Datapath pdev
  1767. * @peer: Datapath peer
  1768. *
  1769. */
  1770. void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
  1771. {
  1772. int tid;
  1773. struct dp_rx_tid *rx_tid;
  1774. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  1775. rx_tid = &peer->rx_tid[tid];
  1776. rx_tid->array = &rx_tid->base;
  1777. rx_tid->base.head = rx_tid->base.tail = NULL;
  1778. rx_tid->tid = tid;
  1779. rx_tid->defrag_timeout_ms = 0;
  1780. rx_tid->ba_win_size = 0;
  1781. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  1782. rx_tid->defrag_waitlist_elem.tqe_next = NULL;
  1783. rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
  1784. #ifdef notyet /* TODO: See if this is required for exception handling */
  1785. /* invalid sequence number */
  1786. peer->tids_last_seq[tid] = 0xffff;
  1787. #endif
  1788. }
  1789. peer->active_ba_session_cnt = 0;
  1790. peer->hw_buffer_size = 0;
  1791. peer->kill_256_sessions = 0;
  1792. /* Setup default (non-qos) rx tid queue */
  1793. dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
  1794. /* Setup rx tid queue for TID 0.
  1795. * Other queues will be setup on receiving first packet, which will cause
  1796. * NULL REO queue error
  1797. */
  1798. dp_rx_tid_setup_wifi3(peer, 0, 1, 0);
  1799. /*
  1800. * Setup the rest of TID's to handle LFR
  1801. */
  1802. dp_peer_setup_remaining_tids(peer);
  1803. /*
  1804. * Set security defaults: no PN check, no security. The target may
  1805. * send a HTT SEC_IND message to overwrite these defaults.
  1806. */
  1807. peer->security[dp_sec_ucast].sec_type =
  1808. peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
  1809. }
  1810. /*
  1811. * dp_peer_rx_cleanup() – Cleanup receive TID state
  1812. * @vdev: Datapath vdev
  1813. * @peer: Datapath peer
  1814. *
  1815. */
  1816. void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
  1817. {
  1818. int tid;
  1819. uint32_t tid_delete_mask = 0;
  1820. DP_TRACE(INFO_HIGH, FL("Remove tids for peer: %pK"), peer);
  1821. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  1822. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  1823. qdf_spin_lock_bh(&rx_tid->tid_lock);
  1824. if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned != NULL) {
  1825. dp_rx_tid_delete_wifi3(peer, tid);
  1826. /* Cleanup defrag related resource */
  1827. dp_rx_defrag_waitlist_remove(peer, tid);
  1828. dp_rx_reorder_flush_frag(peer, tid);
  1829. tid_delete_mask |= (1 << tid);
  1830. }
  1831. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1832. }
  1833. #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
  1834. if (soc->ol_ops->peer_rx_reorder_queue_remove) {
  1835. soc->ol_ops->peer_rx_reorder_queue_remove(vdev->pdev->ctrl_pdev,
  1836. peer->vdev->vdev_id, peer->mac_addr.raw,
  1837. tid_delete_mask);
  1838. }
  1839. #endif
  1840. for (tid = 0; tid < DP_MAX_TIDS; tid++)
  1841. qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
  1842. }
  1843. /*
  1844. * dp_peer_cleanup() – Cleanup peer information
  1845. * @vdev: Datapath vdev
  1846. * @peer: Datapath peer
  1847. *
  1848. */
  1849. void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
  1850. {
  1851. peer->last_assoc_rcvd = 0;
  1852. peer->last_disassoc_rcvd = 0;
  1853. peer->last_deauth_rcvd = 0;
  1854. /* cleanup the Rx reorder queues for this peer */
  1855. dp_peer_rx_cleanup(vdev, peer);
  1856. }
  1857. /* dp_teardown_256_ba_session() - Teardown sessions using 256
  1858. * window size when a request with
  1859. * 64 window size is received.
  1860. * This is done as a WAR since HW can
  1861. * have only one setting per peer (64 or 256).
  1862. * For HKv2, we use per tid buffersize setting
  1863. * for 0 to per_tid_basize_max_tid. For tid
  1864. * more than per_tid_basize_max_tid we use HKv1
  1865. * method.
  1866. * @peer: Datapath peer
  1867. *
  1868. * Return: void
  1869. */
  1870. static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
  1871. {
  1872. uint8_t delba_rcode = 0;
  1873. int tid;
  1874. struct dp_rx_tid *rx_tid = NULL;
  1875. tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
  1876. for (; tid < DP_MAX_TIDS; tid++) {
  1877. rx_tid = &peer->rx_tid[tid];
  1878. qdf_spin_lock_bh(&rx_tid->tid_lock);
  1879. if (rx_tid->ba_win_size <= 64) {
  1880. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1881. continue;
  1882. } else {
  1883. if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
  1884. rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  1885. /* send delba */
  1886. if (!rx_tid->delba_tx_status) {
  1887. rx_tid->delba_tx_retry++;
  1888. rx_tid->delba_tx_status = 1;
  1889. rx_tid->delba_rcode =
  1890. IEEE80211_REASON_QOS_SETUP_REQUIRED;
  1891. delba_rcode = rx_tid->delba_rcode;
  1892. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1893. peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
  1894. peer->vdev->pdev->ctrl_pdev,
  1895. peer->ctrl_peer,
  1896. peer->mac_addr.raw,
  1897. tid, peer->vdev->ctrl_vdev,
  1898. delba_rcode);
  1899. } else {
  1900. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1901. }
  1902. } else {
  1903. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1904. }
  1905. }
  1906. }
  1907. }
  1908. /*
  1909. * dp_rx_addba_resp_tx_completion_wifi3() – Update Rx Tid State
  1910. *
  1911. * @peer: Datapath peer handle
  1912. * @tid: TID number
  1913. * @status: tx completion status
  1914. * Return: 0 on success, error code on failure
  1915. */
  1916. int dp_addba_resp_tx_completion_wifi3(void *peer_handle,
  1917. uint8_t tid, int status)
  1918. {
  1919. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1920. struct dp_rx_tid *rx_tid = NULL;
  1921. if (!peer || peer->delete_in_progress) {
  1922. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1923. "%s: Peer is NULL!\n", __func__);
  1924. return QDF_STATUS_E_FAILURE;
  1925. }
  1926. rx_tid = &peer->rx_tid[tid];
  1927. qdf_spin_lock_bh(&rx_tid->tid_lock);
  1928. if (status) {
  1929. rx_tid->num_addba_rsp_failed++;
  1930. dp_rx_tid_update_wifi3(peer, tid, 1, 0);
  1931. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  1932. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1933. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1934. "%s: Rx Tid- %d addba rsp tx completion failed!",
  1935. __func__, tid);
  1936. return QDF_STATUS_SUCCESS;
  1937. }
  1938. rx_tid->num_addba_rsp_success++;
  1939. if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
  1940. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1941. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1942. "%s: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
  1943. __func__, tid);
  1944. return QDF_STATUS_E_FAILURE;
  1945. }
  1946. if (!qdf_atomic_read(&peer->is_default_route_set)) {
  1947. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1948. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1949. "%s: default route is not set for peer: %pM",
  1950. __func__, peer->mac_addr.raw);
  1951. return QDF_STATUS_E_FAILURE;
  1952. }
  1953. /* First Session */
  1954. if (peer->active_ba_session_cnt == 0) {
  1955. if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
  1956. peer->hw_buffer_size = 256;
  1957. else
  1958. peer->hw_buffer_size = 64;
  1959. }
  1960. rx_tid->ba_status = DP_RX_BA_ACTIVE;
  1961. peer->active_ba_session_cnt++;
  1962. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  1963. /* Kill any session having 256 buffer size
  1964. * when 64 buffer size request is received.
  1965. * Also, latch on to 64 as new buffer size.
  1966. */
  1967. if (peer->kill_256_sessions) {
  1968. dp_teardown_256_ba_sessions(peer);
  1969. peer->kill_256_sessions = 0;
  1970. }
  1971. return QDF_STATUS_SUCCESS;
  1972. }
  1973. /*
  1974. * dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
  1975. *
  1976. * @peer: Datapath peer handle
  1977. * @tid: TID number
  1978. * @dialogtoken: output dialogtoken
  1979. * @statuscode: output dialogtoken
  1980. * @buffersize: Output BA window size
  1981. * @batimeout: Output BA timeout
  1982. */
  1983. void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
  1984. uint8_t *dialogtoken, uint16_t *statuscode,
  1985. uint16_t *buffersize, uint16_t *batimeout)
  1986. {
  1987. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1988. struct dp_rx_tid *rx_tid = NULL;
  1989. if (!peer || peer->delete_in_progress) {
  1990. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  1991. "%s: Peer is NULL!\n", __func__);
  1992. return;
  1993. }
  1994. rx_tid = &peer->rx_tid[tid];
  1995. qdf_spin_lock_bh(&rx_tid->tid_lock);
  1996. rx_tid->num_of_addba_resp++;
  1997. /* setup ADDBA response parameters */
  1998. *dialogtoken = rx_tid->dialogtoken;
  1999. *statuscode = rx_tid->statuscode;
  2000. *buffersize = rx_tid->ba_win_size;
  2001. *batimeout = 0;
  2002. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2003. }
  2004. /* dp_check_ba_buffersize() - Check buffer size in request
  2005. * and latch onto this size based on
  2006. * size used in first active session.
  2007. * @peer: Datapath peer
  2008. * @tid: Tid
  2009. * @buffersize: Block ack window size
  2010. *
  2011. * Return: void
  2012. */
  2013. static void dp_check_ba_buffersize(struct dp_peer *peer,
  2014. uint16_t tid,
  2015. uint16_t buffersize)
  2016. {
  2017. struct dp_rx_tid *rx_tid = NULL;
  2018. rx_tid = &peer->rx_tid[tid];
  2019. if (peer->vdev->pdev->soc->per_tid_basize_max_tid &&
  2020. tid < peer->vdev->pdev->soc->per_tid_basize_max_tid) {
  2021. rx_tid->ba_win_size = buffersize;
  2022. return;
  2023. } else {
  2024. if (peer->active_ba_session_cnt == 0) {
  2025. rx_tid->ba_win_size = buffersize;
  2026. } else {
  2027. if (peer->hw_buffer_size == 64) {
  2028. if (buffersize <= 64)
  2029. rx_tid->ba_win_size = buffersize;
  2030. else
  2031. rx_tid->ba_win_size = peer->hw_buffer_size;
  2032. } else if (peer->hw_buffer_size == 256) {
  2033. if (buffersize > 64) {
  2034. rx_tid->ba_win_size = buffersize;
  2035. } else {
  2036. rx_tid->ba_win_size = buffersize;
  2037. peer->hw_buffer_size = 64;
  2038. peer->kill_256_sessions = 1;
  2039. }
  2040. }
  2041. }
  2042. }
  2043. }
  2044. /*
  2045. * dp_addba_requestprocess_wifi3() - Process ADDBA request from peer
  2046. *
  2047. * @peer: Datapath peer handle
  2048. * @dialogtoken: dialogtoken from ADDBA frame
  2049. * @tid: TID number
  2050. * @batimeout: BA timeout
  2051. * @buffersize: BA window size
  2052. * @startseqnum: Start seq. number received in BA sequence control
  2053. *
  2054. * Return: 0 on success, error code on failure
  2055. */
  2056. int dp_addba_requestprocess_wifi3(void *peer_handle,
  2057. uint8_t dialogtoken,
  2058. uint16_t tid, uint16_t batimeout,
  2059. uint16_t buffersize,
  2060. uint16_t startseqnum)
  2061. {
  2062. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2063. struct dp_rx_tid *rx_tid = NULL;
  2064. if (!peer || peer->delete_in_progress) {
  2065. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  2066. "%s: Peer is NULL!\n", __func__);
  2067. return QDF_STATUS_E_FAILURE;
  2068. }
  2069. rx_tid = &peer->rx_tid[tid];
  2070. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2071. rx_tid->num_of_addba_req++;
  2072. if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
  2073. rx_tid->hw_qdesc_vaddr_unaligned != NULL) ||
  2074. (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS)) {
  2075. dp_rx_tid_update_wifi3(peer, tid, 1, 0);
  2076. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2077. peer->active_ba_session_cnt--;
  2078. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2079. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2080. "%s: Rx Tid- %d hw qdesc is already setup",
  2081. __func__, tid);
  2082. return QDF_STATUS_E_FAILURE;
  2083. }
  2084. if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2085. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2086. return QDF_STATUS_E_FAILURE;
  2087. }
  2088. dp_check_ba_buffersize(peer, tid, buffersize);
  2089. if (dp_rx_tid_setup_wifi3(peer, tid,
  2090. rx_tid->ba_win_size, startseqnum)) {
  2091. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2092. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2093. return QDF_STATUS_E_FAILURE;
  2094. }
  2095. rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
  2096. rx_tid->dialogtoken = dialogtoken;
  2097. rx_tid->startseqnum = startseqnum;
  2098. if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
  2099. rx_tid->statuscode = rx_tid->userstatuscode;
  2100. else
  2101. rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
  2102. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2103. return QDF_STATUS_SUCCESS;
  2104. }
  2105. /*
  2106. * dp_set_addba_response() – Set a user defined ADDBA response status code
  2107. *
  2108. * @peer: Datapath peer handle
  2109. * @tid: TID number
  2110. * @statuscode: response status code to be set
  2111. */
  2112. void dp_set_addba_response(void *peer_handle, uint8_t tid,
  2113. uint16_t statuscode)
  2114. {
  2115. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2116. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  2117. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2118. rx_tid->userstatuscode = statuscode;
  2119. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2120. }
  2121. /*
  2122. * dp_rx_delba_process_wifi3() – Process DELBA from peer
  2123. * @peer: Datapath peer handle
  2124. * @tid: TID number
  2125. * @reasoncode: Reason code received in DELBA frame
  2126. *
  2127. * Return: 0 on success, error code on failure
  2128. */
  2129. int dp_delba_process_wifi3(void *peer_handle,
  2130. int tid, uint16_t reasoncode)
  2131. {
  2132. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2133. struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
  2134. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2135. if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
  2136. rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2137. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2138. return QDF_STATUS_E_FAILURE;
  2139. }
  2140. /* TODO: See if we can delete the existing REO queue descriptor and
  2141. * replace with a new one without queue extenstion descript to save
  2142. * memory
  2143. */
  2144. rx_tid->delba_rcode = reasoncode;
  2145. rx_tid->num_of_delba_req++;
  2146. dp_rx_tid_update_wifi3(peer, tid, 1, 0);
  2147. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2148. peer->active_ba_session_cnt--;
  2149. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2150. return 0;
  2151. }
  2152. /*
  2153. * dp_rx_delba_tx_completion_wifi3() – Send Delba Request
  2154. *
  2155. * @peer: Datapath peer handle
  2156. * @tid: TID number
  2157. * @status: tx completion status
  2158. * Return: 0 on success, error code on failure
  2159. */
  2160. int dp_delba_tx_completion_wifi3(void *peer_handle,
  2161. uint8_t tid, int status)
  2162. {
  2163. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2164. struct dp_rx_tid *rx_tid = NULL;
  2165. if (!peer || peer->delete_in_progress) {
  2166. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  2167. "%s: Peer is NULL!", __func__);
  2168. return QDF_STATUS_E_FAILURE;
  2169. }
  2170. rx_tid = &peer->rx_tid[tid];
  2171. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2172. if (status) {
  2173. rx_tid->delba_tx_fail_cnt++;
  2174. if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
  2175. rx_tid->delba_tx_retry = 0;
  2176. rx_tid->delba_tx_status = 0;
  2177. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2178. } else {
  2179. rx_tid->delba_tx_retry++;
  2180. rx_tid->delba_tx_status = 1;
  2181. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2182. peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
  2183. peer->vdev->pdev->ctrl_pdev, peer->ctrl_peer,
  2184. peer->mac_addr.raw, tid, peer->vdev->ctrl_vdev,
  2185. rx_tid->delba_rcode);
  2186. }
  2187. return QDF_STATUS_SUCCESS;
  2188. } else {
  2189. rx_tid->delba_tx_success_cnt++;
  2190. rx_tid->delba_tx_retry = 0;
  2191. rx_tid->delba_tx_status = 0;
  2192. }
  2193. if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
  2194. dp_rx_tid_update_wifi3(peer, tid, 1, 0);
  2195. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2196. peer->active_ba_session_cnt--;
  2197. }
  2198. if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
  2199. dp_rx_tid_update_wifi3(peer, tid, 1, 0);
  2200. rx_tid->ba_status = DP_RX_BA_INACTIVE;
  2201. }
  2202. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2203. return QDF_STATUS_SUCCESS;
  2204. }
  2205. void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
  2206. qdf_nbuf_t msdu_list)
  2207. {
  2208. while (msdu_list) {
  2209. qdf_nbuf_t msdu = msdu_list;
  2210. msdu_list = qdf_nbuf_next(msdu_list);
  2211. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  2212. "discard rx %pK from partly-deleted peer %pK "
  2213. "(%02x:%02x:%02x:%02x:%02x:%02x)",
  2214. msdu, peer,
  2215. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  2216. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  2217. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  2218. qdf_nbuf_free(msdu);
  2219. }
  2220. }
  2221. /**
  2222. * dp_set_pn_check_wifi3() - enable PN check in REO for security
  2223. * @peer: Datapath peer handle
  2224. * @vdev: Datapath vdev
  2225. * @pdev - data path device instance
  2226. * @sec_type - security type
  2227. * @rx_pn - Receive pn starting number
  2228. *
  2229. */
  2230. void
  2231. dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
  2232. {
  2233. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2234. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2235. struct dp_pdev *pdev;
  2236. struct dp_soc *soc;
  2237. int i;
  2238. uint8_t pn_size;
  2239. struct hal_reo_cmd_params params;
  2240. /* preconditions */
  2241. qdf_assert(vdev);
  2242. pdev = vdev->pdev;
  2243. soc = pdev->soc;
  2244. qdf_mem_zero(&params, sizeof(params));
  2245. params.std.need_status = 1;
  2246. params.u.upd_queue_params.update_pn_valid = 1;
  2247. params.u.upd_queue_params.update_pn_size = 1;
  2248. params.u.upd_queue_params.update_pn = 1;
  2249. params.u.upd_queue_params.update_pn_check_needed = 1;
  2250. params.u.upd_queue_params.update_svld = 1;
  2251. params.u.upd_queue_params.svld = 0;
  2252. peer->security[dp_sec_ucast].sec_type = sec_type;
  2253. switch (sec_type) {
  2254. case cdp_sec_type_tkip_nomic:
  2255. case cdp_sec_type_aes_ccmp:
  2256. case cdp_sec_type_aes_ccmp_256:
  2257. case cdp_sec_type_aes_gcmp:
  2258. case cdp_sec_type_aes_gcmp_256:
  2259. params.u.upd_queue_params.pn_check_needed = 1;
  2260. params.u.upd_queue_params.pn_size = 48;
  2261. pn_size = 48;
  2262. break;
  2263. case cdp_sec_type_wapi:
  2264. params.u.upd_queue_params.pn_check_needed = 1;
  2265. params.u.upd_queue_params.pn_size = 128;
  2266. pn_size = 128;
  2267. if (vdev->opmode == wlan_op_mode_ap) {
  2268. params.u.upd_queue_params.pn_even = 1;
  2269. params.u.upd_queue_params.update_pn_even = 1;
  2270. } else {
  2271. params.u.upd_queue_params.pn_uneven = 1;
  2272. params.u.upd_queue_params.update_pn_uneven = 1;
  2273. }
  2274. break;
  2275. default:
  2276. params.u.upd_queue_params.pn_check_needed = 0;
  2277. pn_size = 0;
  2278. break;
  2279. }
  2280. for (i = 0; i < DP_MAX_TIDS; i++) {
  2281. struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
  2282. qdf_spin_lock_bh(&rx_tid->tid_lock);
  2283. if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
  2284. params.std.addr_lo =
  2285. rx_tid->hw_qdesc_paddr & 0xffffffff;
  2286. params.std.addr_hi =
  2287. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  2288. if (pn_size) {
  2289. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2290. QDF_TRACE_LEVEL_INFO_HIGH,
  2291. "%s PN set for TID:%d pn:%x:%x:%x:%x",
  2292. __func__, i, rx_pn[3], rx_pn[2],
  2293. rx_pn[1], rx_pn[0]);
  2294. params.u.upd_queue_params.update_pn_valid = 1;
  2295. params.u.upd_queue_params.pn_31_0 = rx_pn[0];
  2296. params.u.upd_queue_params.pn_63_32 = rx_pn[1];
  2297. params.u.upd_queue_params.pn_95_64 = rx_pn[2];
  2298. params.u.upd_queue_params.pn_127_96 = rx_pn[3];
  2299. }
  2300. rx_tid->pn_size = pn_size;
  2301. dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
  2302. dp_rx_tid_update_cb, rx_tid);
  2303. } else {
  2304. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  2305. "PN Check not setup for TID :%d ", i);
  2306. }
  2307. qdf_spin_unlock_bh(&rx_tid->tid_lock);
  2308. }
  2309. }
  2310. void
  2311. dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
  2312. enum cdp_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
  2313. u_int32_t *rx_pn)
  2314. {
  2315. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  2316. struct dp_peer *peer;
  2317. int sec_index;
  2318. peer = dp_peer_find_by_id(soc, peer_id);
  2319. if (!peer) {
  2320. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2321. "Couldn't find peer from ID %d - skipping security inits",
  2322. peer_id);
  2323. return;
  2324. }
  2325. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  2326. "sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
  2327. "%s key of type %d",
  2328. peer,
  2329. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  2330. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  2331. peer->mac_addr.raw[4], peer->mac_addr.raw[5],
  2332. is_unicast ? "ucast" : "mcast",
  2333. sec_type);
  2334. sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
  2335. peer->security[sec_index].sec_type = sec_type;
  2336. #ifdef notyet /* TODO: See if this is required for defrag support */
  2337. /* michael key only valid for TKIP, but for simplicity,
  2338. * copy it anyway
  2339. */
  2340. qdf_mem_copy(
  2341. &peer->security[sec_index].michael_key[0],
  2342. michael_key,
  2343. sizeof(peer->security[sec_index].michael_key));
  2344. #ifdef BIG_ENDIAN_HOST
  2345. OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
  2346. sizeof(peer->security[sec_index].michael_key));
  2347. #endif /* BIG_ENDIAN_HOST */
  2348. #endif
  2349. #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
  2350. if (sec_type != cdp_sec_type_wapi) {
  2351. qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
  2352. } else {
  2353. for (i = 0; i < DP_MAX_TIDS; i++) {
  2354. /*
  2355. * Setting PN valid bit for WAPI sec_type,
  2356. * since WAPI PN has to be started with predefined value
  2357. */
  2358. peer->tids_last_pn_valid[i] = 1;
  2359. qdf_mem_copy(
  2360. (u_int8_t *) &peer->tids_last_pn[i],
  2361. (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
  2362. peer->tids_last_pn[i].pn128[1] =
  2363. qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
  2364. peer->tids_last_pn[i].pn128[0] =
  2365. qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
  2366. }
  2367. }
  2368. #endif
  2369. /* TODO: Update HW TID queue with PN check parameters (pn type for
  2370. * all security types and last pn for WAPI) once REO command API
  2371. * is available
  2372. */
  2373. dp_peer_unref_del_find_by_id(peer);
  2374. }
  2375. #ifndef CONFIG_WIN
  2376. /**
  2377. * dp_register_peer() - Register peer into physical device
  2378. * @pdev - data path device instance
  2379. * @sta_desc - peer description
  2380. *
  2381. * Register peer into physical device
  2382. *
  2383. * Return: QDF_STATUS_SUCCESS registration success
  2384. * QDF_STATUS_E_FAULT peer not found
  2385. */
  2386. QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
  2387. struct ol_txrx_desc_type *sta_desc)
  2388. {
  2389. struct dp_peer *peer;
  2390. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2391. peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev,
  2392. sta_desc->sta_id);
  2393. if (!peer)
  2394. return QDF_STATUS_E_FAULT;
  2395. qdf_spin_lock_bh(&peer->peer_info_lock);
  2396. peer->state = OL_TXRX_PEER_STATE_CONN;
  2397. qdf_spin_unlock_bh(&peer->peer_info_lock);
  2398. return QDF_STATUS_SUCCESS;
  2399. }
  2400. /**
  2401. * dp_clear_peer() - remove peer from physical device
  2402. * @pdev - data path device instance
  2403. * @sta_id - local peer id
  2404. *
  2405. * remove peer from physical device
  2406. *
  2407. * Return: QDF_STATUS_SUCCESS registration success
  2408. * QDF_STATUS_E_FAULT peer not found
  2409. */
  2410. QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
  2411. {
  2412. struct dp_peer *peer;
  2413. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2414. peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, local_id);
  2415. if (!peer)
  2416. return QDF_STATUS_E_FAULT;
  2417. qdf_spin_lock_bh(&peer->peer_info_lock);
  2418. peer->state = OL_TXRX_PEER_STATE_DISC;
  2419. qdf_spin_unlock_bh(&peer->peer_info_lock);
  2420. return QDF_STATUS_SUCCESS;
  2421. }
  2422. /**
  2423. * dp_find_peer_by_addr_and_vdev() - Find peer by peer mac address within vdev
  2424. * @pdev - data path device instance
  2425. * @vdev - virtual interface instance
  2426. * @peer_addr - peer mac address
  2427. * @peer_id - local peer id with target mac address
  2428. *
  2429. * Find peer by peer mac address within vdev
  2430. *
  2431. * Return: peer instance void pointer
  2432. * NULL cannot find target peer
  2433. */
  2434. void *dp_find_peer_by_addr_and_vdev(struct cdp_pdev *pdev_handle,
  2435. struct cdp_vdev *vdev_handle,
  2436. uint8_t *peer_addr, uint8_t *local_id)
  2437. {
  2438. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2439. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2440. struct dp_peer *peer;
  2441. DP_TRACE(INFO, "vdev %pK peer_addr %pK", vdev, peer_addr);
  2442. peer = dp_peer_find_hash_find(pdev->soc, peer_addr, 0, 0);
  2443. DP_TRACE(INFO, "peer %pK vdev %pK", peer, vdev);
  2444. if (!peer)
  2445. return NULL;
  2446. if (peer->vdev != vdev) {
  2447. dp_peer_unref_delete(peer);
  2448. return NULL;
  2449. }
  2450. *local_id = peer->local_id;
  2451. DP_TRACE(INFO, "peer %pK vdev %pK local id %d", peer, vdev, *local_id);
  2452. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  2453. * Decrement it here.
  2454. */
  2455. dp_peer_unref_delete(peer);
  2456. return peer;
  2457. }
  2458. /**
  2459. * dp_local_peer_id() - Find local peer id within peer instance
  2460. * @peer - peer instance
  2461. *
  2462. * Find local peer id within peer instance
  2463. *
  2464. * Return: local peer id
  2465. */
  2466. uint16_t dp_local_peer_id(void *peer)
  2467. {
  2468. return ((struct dp_peer *)peer)->local_id;
  2469. }
  2470. /**
  2471. * dp_peer_find_by_local_id() - Find peer by local peer id
  2472. * @pdev - data path device instance
  2473. * @local_peer_id - local peer id want to find
  2474. *
  2475. * Find peer by local peer id within physical device
  2476. *
  2477. * Return: peer instance void pointer
  2478. * NULL cannot find target peer
  2479. */
  2480. void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id)
  2481. {
  2482. struct dp_peer *peer;
  2483. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2484. if (local_id >= OL_TXRX_NUM_LOCAL_PEER_IDS) {
  2485. QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
  2486. "Incorrect local id %u", local_id);
  2487. return NULL;
  2488. }
  2489. qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  2490. peer = pdev->local_peer_ids.map[local_id];
  2491. qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  2492. DP_TRACE(DEBUG, "peer %pK local id %d", peer, local_id);
  2493. return peer;
  2494. }
  2495. /**
  2496. * dp_peer_state_update() - update peer local state
  2497. * @pdev - data path device instance
  2498. * @peer_addr - peer mac address
  2499. * @state - new peer local state
  2500. *
  2501. * update peer local state
  2502. *
  2503. * Return: QDF_STATUS_SUCCESS registration success
  2504. */
  2505. QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
  2506. enum ol_txrx_peer_state state)
  2507. {
  2508. struct dp_peer *peer;
  2509. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2510. peer = dp_peer_find_hash_find(pdev->soc, peer_mac, 0, DP_VDEV_ALL);
  2511. if (NULL == peer) {
  2512. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2513. "Failed to find peer for: [%pM]", peer_mac);
  2514. return QDF_STATUS_E_FAILURE;
  2515. }
  2516. peer->state = state;
  2517. DP_TRACE(INFO, "peer %pK state %d", peer, peer->state);
  2518. /* ref_cnt is incremented inside dp_peer_find_hash_find().
  2519. * Decrement it here.
  2520. */
  2521. dp_peer_unref_delete(peer);
  2522. return QDF_STATUS_SUCCESS;
  2523. }
  2524. /**
  2525. * dp_get_vdevid() - Get virtual interface id which peer registered
  2526. * @peer - peer instance
  2527. * @vdev_id - virtual interface id which peer registered
  2528. *
  2529. * Get virtual interface id which peer registered
  2530. *
  2531. * Return: QDF_STATUS_SUCCESS registration success
  2532. */
  2533. QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
  2534. {
  2535. struct dp_peer *peer = peer_handle;
  2536. DP_TRACE(INFO, "peer %pK vdev %pK vdev id %d",
  2537. peer, peer->vdev, peer->vdev->vdev_id);
  2538. *vdev_id = peer->vdev->vdev_id;
  2539. return QDF_STATUS_SUCCESS;
  2540. }
  2541. struct cdp_vdev *dp_get_vdev_by_sta_id(struct cdp_pdev *pdev_handle,
  2542. uint8_t sta_id)
  2543. {
  2544. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  2545. struct dp_peer *peer = NULL;
  2546. if (sta_id >= WLAN_MAX_STA_COUNT) {
  2547. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  2548. "Invalid sta id passed");
  2549. return NULL;
  2550. }
  2551. if (!pdev) {
  2552. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  2553. "PDEV not found for sta_id [%d]", sta_id);
  2554. return NULL;
  2555. }
  2556. peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
  2557. if (!peer) {
  2558. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  2559. "PEER [%d] not found", sta_id);
  2560. return NULL;
  2561. }
  2562. return (struct cdp_vdev *)peer->vdev;
  2563. }
  2564. /**
  2565. * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
  2566. * @peer - peer instance
  2567. *
  2568. * Get virtual interface instance which peer belongs
  2569. *
  2570. * Return: virtual interface instance pointer
  2571. * NULL in case cannot find
  2572. */
  2573. struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
  2574. {
  2575. struct dp_peer *peer = peer_handle;
  2576. DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
  2577. return (struct cdp_vdev *)peer->vdev;
  2578. }
  2579. /**
  2580. * dp_peer_get_peer_mac_addr() - Get peer mac address
  2581. * @peer - peer instance
  2582. *
  2583. * Get peer mac address
  2584. *
  2585. * Return: peer mac address pointer
  2586. * NULL in case cannot find
  2587. */
  2588. uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
  2589. {
  2590. struct dp_peer *peer = peer_handle;
  2591. uint8_t *mac;
  2592. mac = peer->mac_addr.raw;
  2593. DP_TRACE(INFO, "peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
  2594. peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  2595. return peer->mac_addr.raw;
  2596. }
  2597. /**
  2598. * dp_get_peer_state() - Get local peer state
  2599. * @peer - peer instance
  2600. *
  2601. * Get local peer state
  2602. *
  2603. * Return: peer status
  2604. */
  2605. int dp_get_peer_state(void *peer_handle)
  2606. {
  2607. struct dp_peer *peer = peer_handle;
  2608. DP_TRACE(DEBUG, "peer %pK stats %d", peer, peer->state);
  2609. return peer->state;
  2610. }
  2611. /**
  2612. * dp_local_peer_id_pool_init() - local peer id pool alloc for physical device
  2613. * @pdev - data path device instance
  2614. *
  2615. * local peer id pool alloc for physical device
  2616. *
  2617. * Return: none
  2618. */
  2619. void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
  2620. {
  2621. int i;
  2622. /* point the freelist to the first ID */
  2623. pdev->local_peer_ids.freelist = 0;
  2624. /* link each ID to the next one */
  2625. for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
  2626. pdev->local_peer_ids.pool[i] = i + 1;
  2627. pdev->local_peer_ids.map[i] = NULL;
  2628. }
  2629. /* link the last ID to itself, to mark the end of the list */
  2630. i = OL_TXRX_NUM_LOCAL_PEER_IDS;
  2631. pdev->local_peer_ids.pool[i] = i;
  2632. qdf_spinlock_create(&pdev->local_peer_ids.lock);
  2633. DP_TRACE(INFO, "Peer pool init");
  2634. }
  2635. /**
  2636. * dp_local_peer_id_alloc() - allocate local peer id
  2637. * @pdev - data path device instance
  2638. * @peer - new peer instance
  2639. *
  2640. * allocate local peer id
  2641. *
  2642. * Return: none
  2643. */
  2644. void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
  2645. {
  2646. int i;
  2647. qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  2648. i = pdev->local_peer_ids.freelist;
  2649. if (pdev->local_peer_ids.pool[i] == i) {
  2650. /* the list is empty, except for the list-end marker */
  2651. peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
  2652. } else {
  2653. /* take the head ID and advance the freelist */
  2654. peer->local_id = i;
  2655. pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
  2656. pdev->local_peer_ids.map[i] = peer;
  2657. }
  2658. qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  2659. DP_TRACE(INFO, "peer %pK, local id %d", peer, peer->local_id);
  2660. }
  2661. /**
  2662. * dp_local_peer_id_free() - remove local peer id
  2663. * @pdev - data path device instance
  2664. * @peer - peer instance should be removed
  2665. *
  2666. * remove local peer id
  2667. *
  2668. * Return: none
  2669. */
  2670. void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
  2671. {
  2672. int i = peer->local_id;
  2673. if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
  2674. (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
  2675. return;
  2676. }
  2677. /* put this ID on the head of the freelist */
  2678. qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  2679. pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
  2680. pdev->local_peer_ids.freelist = i;
  2681. pdev->local_peer_ids.map[i] = NULL;
  2682. qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  2683. }
  2684. #endif
  2685. /**
  2686. * dp_get_peer_mac_addr_frm_id(): get mac address of the peer
  2687. * @soc_handle: DP SOC handle
  2688. * @peer_id:peer_id of the peer
  2689. *
  2690. * return: vdev_id of the vap
  2691. */
  2692. uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
  2693. uint16_t peer_id, uint8_t *peer_mac)
  2694. {
  2695. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  2696. struct dp_peer *peer;
  2697. uint8_t vdev_id;
  2698. peer = dp_peer_find_by_id(soc, peer_id);
  2699. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  2700. "soc %pK peer_id %d", soc, peer_id);
  2701. if (!peer) {
  2702. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2703. "peer not found ");
  2704. return CDP_INVALID_VDEV_ID;
  2705. }
  2706. qdf_mem_copy(peer_mac, peer->mac_addr.raw, 6);
  2707. vdev_id = peer->vdev->vdev_id;
  2708. dp_peer_unref_del_find_by_id(peer);
  2709. return vdev_id;
  2710. }
  2711. /**
  2712. * dp_peer_rxtid_stats: Retried Rx TID (REO queue) stats from HW
  2713. * @peer: DP peer handle
  2714. * @dp_stats_cmd_cb: REO command callback function
  2715. * @cb_ctxt: Callback context
  2716. *
  2717. * Return: none
  2718. */
  2719. void dp_peer_rxtid_stats(struct dp_peer *peer, void (*dp_stats_cmd_cb),
  2720. void *cb_ctxt)
  2721. {
  2722. struct dp_soc *soc = peer->vdev->pdev->soc;
  2723. struct hal_reo_cmd_params params;
  2724. int i;
  2725. if (!dp_stats_cmd_cb)
  2726. return;
  2727. qdf_mem_zero(&params, sizeof(params));
  2728. for (i = 0; i < DP_MAX_TIDS; i++) {
  2729. struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
  2730. if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
  2731. params.std.need_status = 1;
  2732. params.std.addr_lo =
  2733. rx_tid->hw_qdesc_paddr & 0xffffffff;
  2734. params.std.addr_hi =
  2735. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  2736. if (cb_ctxt) {
  2737. dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
  2738. &params, dp_stats_cmd_cb, cb_ctxt);
  2739. } else {
  2740. dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
  2741. &params, dp_stats_cmd_cb, rx_tid);
  2742. }
  2743. /* Flush REO descriptor from HW cache to update stats
  2744. * in descriptor memory. This is to help debugging */
  2745. qdf_mem_zero(&params, sizeof(params));
  2746. params.std.need_status = 0;
  2747. params.std.addr_lo =
  2748. rx_tid->hw_qdesc_paddr & 0xffffffff;
  2749. params.std.addr_hi =
  2750. (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
  2751. params.u.fl_cache_params.flush_no_inval = 1;
  2752. dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
  2753. NULL);
  2754. }
  2755. }
  2756. }
  2757. void dp_set_michael_key(struct cdp_peer *peer_handle,
  2758. bool is_unicast, uint32_t *key)
  2759. {
  2760. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2761. uint8_t sec_index = is_unicast ? 1 : 0;
  2762. if (!peer) {
  2763. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2764. "peer not found ");
  2765. return;
  2766. }
  2767. qdf_mem_copy(&peer->security[sec_index].michael_key[0],
  2768. key, IEEE80211_WEP_MICLEN);
  2769. }
  2770. bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
  2771. {
  2772. struct dp_peer *peer = dp_peer_find_by_id(soc, peer_id);
  2773. if (peer) {
  2774. /*
  2775. * Decrement the peer ref which is taken as part of
  2776. * dp_peer_find_by_id if PEER_LOCK_REF_PROTECT is enabled
  2777. */
  2778. dp_peer_unref_del_find_by_id(peer);
  2779. return true;
  2780. }
  2781. return false;
  2782. }