ol_txrx.c 94 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398
  1. /*
  2. * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /*=== includes ===*/
  27. /* header files for OS primitives */
  28. #include <osdep.h> /* uint32_t, etc. */
  29. #include <cdf_memory.h> /* cdf_mem_malloc,free */
  30. #include <cdf_types.h> /* cdf_device_t, cdf_print */
  31. #include <cdf_lock.h> /* cdf_spinlock */
  32. #include <cdf_atomic.h> /* cdf_atomic_read */
  33. /* Required for WLAN_FEATURE_FASTPATH */
  34. #include <ce_api.h>
  35. /* header files for utilities */
  36. #include <cds_queue.h> /* TAILQ */
  37. /* header files for configuration API */
  38. #include <ol_cfg.h> /* ol_cfg_is_high_latency */
  39. #include <ol_if_athvar.h>
  40. /* header files for HTT API */
  41. #include <ol_htt_api.h>
  42. #include <ol_htt_tx_api.h>
  43. /* header files for OS shim API */
  44. #include <ol_osif_api.h>
  45. /* header files for our own APIs */
  46. #include <ol_txrx_api.h>
  47. #include <ol_txrx_dbg.h>
  48. #include <ol_txrx_ctrl_api.h>
  49. #include <ol_txrx_osif_api.h>
  50. /* header files for our internal definitions */
  51. #include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
  52. #include <wdi_event.h> /* WDI events */
  53. #include <ol_txrx_types.h> /* ol_txrx_pdev_t, etc. */
  54. #include <ol_ctrl_txrx_api.h>
  55. #include <ol_tx.h> /* ol_tx_ll */
  56. #include <ol_rx.h> /* ol_rx_deliver */
  57. #include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
  58. #include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
  59. #include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
  60. #include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
  61. #include <ol_rx_reorder.h>
  62. #include <ol_tx_send.h> /* ol_tx_discard_target_frms */
  63. #include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
  64. #include <ol_tx_queue.h>
  65. #include <ol_txrx.h>
  66. #include "wma.h"
  67. /*=== function definitions ===*/
  68. /**
  69. * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
  70. * wmi is enabled or not.
  71. * @value: 1 for enabled/ 0 for disable
  72. *
  73. * Return: None
  74. */
  75. void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
  76. {
  77. struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  78. if (!pdev) {
  79. cdf_print("%s: pdev is NULL\n", __func__);
  80. return;
  81. }
  82. pdev->is_mgmt_over_wmi_enabled = value;
  83. return;
  84. }
  85. /**
  86. * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
  87. *
  88. * Return: is_mgmt_over_wmi_enabled
  89. */
  90. uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
  91. {
  92. struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  93. if (!pdev) {
  94. cdf_print("%s: pdev is NULL\n", __func__);
  95. return 0;
  96. }
  97. return pdev->is_mgmt_over_wmi_enabled;
  98. }
  99. #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
  100. ol_txrx_peer_handle
  101. ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
  102. ol_txrx_vdev_handle vdev,
  103. uint8_t *peer_addr, uint8_t *peer_id)
  104. {
  105. struct ol_txrx_peer_t *peer;
  106. peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
  107. if (!peer)
  108. return NULL;
  109. *peer_id = peer->local_id;
  110. cdf_atomic_dec(&peer->ref_cnt);
  111. return peer;
  112. }
  113. CDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id)
  114. {
  115. if (!peer) {
  116. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  117. "peer argument is null!!");
  118. return CDF_STATUS_E_FAILURE;
  119. }
  120. *vdev_id = peer->vdev->vdev_id;
  121. return CDF_STATUS_SUCCESS;
  122. }
  123. void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id)
  124. {
  125. struct ol_txrx_peer_t *peer = NULL;
  126. ol_txrx_pdev_handle pdev = NULL;
  127. if (sta_id >= WLAN_MAX_STA_COUNT) {
  128. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  129. "Invalid sta id passed");
  130. return NULL;
  131. }
  132. pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  133. if (!pdev) {
  134. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  135. "PDEV not found for sta_id [%d]", sta_id);
  136. return NULL;
  137. }
  138. peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
  139. if (!peer) {
  140. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  141. "PEER [%d] not found", sta_id);
  142. return NULL;
  143. }
  144. return peer->vdev;
  145. }
  146. ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
  147. uint8_t *peer_addr,
  148. uint8_t *peer_id)
  149. {
  150. struct ol_txrx_peer_t *peer;
  151. peer = ol_txrx_peer_find_hash_find(pdev, peer_addr, 0, 1);
  152. if (!peer)
  153. return NULL;
  154. *peer_id = peer->local_id;
  155. cdf_atomic_dec(&peer->ref_cnt);
  156. return peer;
  157. }
  158. uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer)
  159. {
  160. return peer->local_id;
  161. }
  162. ol_txrx_peer_handle
  163. ol_txrx_peer_find_by_local_id(struct ol_txrx_pdev_t *pdev,
  164. uint8_t local_peer_id)
  165. {
  166. struct ol_txrx_peer_t *peer;
  167. if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
  168. (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
  169. return NULL;
  170. }
  171. cdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  172. peer = pdev->local_peer_ids.map[local_peer_id];
  173. cdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  174. return peer;
  175. }
  176. static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
  177. {
  178. int i;
  179. /* point the freelist to the first ID */
  180. pdev->local_peer_ids.freelist = 0;
  181. /* link each ID to the next one */
  182. for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
  183. pdev->local_peer_ids.pool[i] = i + 1;
  184. pdev->local_peer_ids.map[i] = NULL;
  185. }
  186. /* link the last ID to itself, to mark the end of the list */
  187. i = OL_TXRX_NUM_LOCAL_PEER_IDS;
  188. pdev->local_peer_ids.pool[i] = i;
  189. cdf_spinlock_init(&pdev->local_peer_ids.lock);
  190. }
  191. static void
  192. ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
  193. struct ol_txrx_peer_t *peer)
  194. {
  195. int i;
  196. cdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  197. i = pdev->local_peer_ids.freelist;
  198. if (pdev->local_peer_ids.pool[i] == i) {
  199. /* the list is empty, except for the list-end marker */
  200. peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
  201. } else {
  202. /* take the head ID and advance the freelist */
  203. peer->local_id = i;
  204. pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
  205. pdev->local_peer_ids.map[i] = peer;
  206. }
  207. cdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  208. }
  209. static void
  210. ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
  211. struct ol_txrx_peer_t *peer)
  212. {
  213. int i = peer->local_id;
  214. if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
  215. (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
  216. return;
  217. }
  218. /* put this ID on the head of the freelist */
  219. cdf_spin_lock_bh(&pdev->local_peer_ids.lock);
  220. pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
  221. pdev->local_peer_ids.freelist = i;
  222. pdev->local_peer_ids.map[i] = NULL;
  223. cdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
  224. }
  225. static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
  226. {
  227. cdf_spinlock_destroy(&pdev->local_peer_ids.lock);
  228. }
  229. #else
  230. #define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
  231. #define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
  232. #define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
  233. #define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
  234. #endif
  235. #ifdef WLAN_FEATURE_FASTPATH
  236. /**
  237. * setup_fastpath_ce_handles() Update pdev with ce_handle for fastpath use.
  238. *
  239. * @osc: pointer to HIF context
  240. * @pdev: pointer to ol pdev
  241. *
  242. * Return: void
  243. */
  244. static inline void
  245. setup_fastpath_ce_handles(struct ol_softc *osc, struct ol_txrx_pdev_t *pdev)
  246. {
  247. /*
  248. * Before the HTT attach, set up the CE handles
  249. * CE handles are (struct CE_state *)
  250. * This is only required in the fast path
  251. */
  252. pdev->ce_tx_hdl = (struct CE_handle *)
  253. osc->ce_id_to_state[CE_HTT_H2T_MSG];
  254. }
  255. #else /* not WLAN_FEATURE_FASTPATH */
  256. static inline void
  257. setup_fastpath_ce_handles(struct ol_softc *osc, struct ol_txrx_pdev_t *pdev)
  258. {
  259. }
  260. #endif /* WLAN_FEATURE_FASTPATH */
  261. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  262. /**
  263. * ol_tx_set_desc_global_pool_size() - set global pool size
  264. * @num_msdu_desc: total number of descriptors
  265. *
  266. * Return: none
  267. */
  268. void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
  269. {
  270. struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  271. if (!pdev) {
  272. cdf_print("%s: pdev is NULL\n", __func__);
  273. return;
  274. }
  275. pdev->num_msdu_desc = num_msdu_desc;
  276. if (!ol_tx_get_is_mgmt_over_wmi_enabled())
  277. pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
  278. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global pool size: %d\n",
  279. pdev->num_msdu_desc);
  280. return;
  281. }
  282. /**
  283. * ol_tx_get_desc_global_pool_size() - get global pool size
  284. * @pdev: pdev handle
  285. *
  286. * Return: global pool size
  287. */
  288. static inline
  289. uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
  290. {
  291. return pdev->num_msdu_desc;
  292. }
  293. /**
  294. * ol_tx_get_total_free_desc() - get total free descriptors
  295. * @pdev: pdev handle
  296. *
  297. * Return: total free descriptors
  298. */
  299. static inline
  300. uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
  301. {
  302. struct ol_tx_flow_pool_t *pool = NULL;
  303. uint32_t free_desc;
  304. free_desc = pdev->tx_desc.num_free;
  305. cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
  306. TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
  307. flow_pool_list_elem) {
  308. cdf_spin_lock_bh(&pool->flow_pool_lock);
  309. free_desc += pool->avail_desc;
  310. cdf_spin_unlock_bh(&pool->flow_pool_lock);
  311. }
  312. cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
  313. return free_desc;
  314. }
  315. #else
  316. /**
  317. * ol_tx_get_desc_global_pool_size() - get global pool size
  318. * @pdev: pdev handle
  319. *
  320. * Return: global pool size
  321. */
  322. static inline
  323. uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
  324. {
  325. return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
  326. }
  327. /**
  328. * ol_tx_get_total_free_desc() - get total free descriptors
  329. * @pdev: pdev handle
  330. *
  331. * Return: total free descriptors
  332. */
  333. static inline
  334. uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
  335. {
  336. return pdev->tx_desc.num_free;
  337. }
  338. #endif
  339. /**
  340. * ol_txrx_pdev_alloc() - allocate txrx pdev
  341. * @ctrl_pdev: cfg pdev
  342. * @htc_pdev: HTC pdev
  343. * @osdev: os dev
  344. *
  345. * Return: txrx pdev handle
  346. * NULL for failure
  347. */
  348. ol_txrx_pdev_handle
  349. ol_txrx_pdev_alloc(ol_pdev_handle ctrl_pdev,
  350. HTC_HANDLE htc_pdev, cdf_device_t osdev)
  351. {
  352. struct ol_txrx_pdev_t *pdev;
  353. int i;
  354. pdev = cdf_mem_malloc(sizeof(*pdev));
  355. if (!pdev)
  356. goto fail0;
  357. cdf_mem_zero(pdev, sizeof(*pdev));
  358. pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
  359. /* store provided params */
  360. pdev->ctrl_pdev = ctrl_pdev;
  361. pdev->osdev = osdev;
  362. for (i = 0; i < htt_num_sec_types; i++)
  363. pdev->sec_types[i] = (enum ol_sec_type)i;
  364. TXRX_STATS_INIT(pdev);
  365. TAILQ_INIT(&pdev->vdev_list);
  366. /* do initial set up of the peer ID -> peer object lookup map */
  367. if (ol_txrx_peer_find_attach(pdev))
  368. goto fail1;
  369. pdev->htt_pdev =
  370. htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
  371. if (!pdev->htt_pdev)
  372. goto fail2;
  373. return pdev;
  374. fail2:
  375. ol_txrx_peer_find_detach(pdev);
  376. fail1:
  377. cdf_mem_free(pdev);
  378. fail0:
  379. return NULL;
  380. }
  381. /**
  382. * ol_txrx_pdev_attach() - attach txrx pdev
  383. * @pdev: txrx pdev
  384. *
  385. * Return: 0 for success
  386. */
  387. int
  388. ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
  389. {
  390. uint16_t i;
  391. uint16_t fail_idx = 0;
  392. int ret = 0;
  393. uint16_t desc_pool_size;
  394. struct ol_softc *osc = cds_get_context(CDF_MODULE_ID_HIF);
  395. uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
  396. union ol_tx_desc_list_elem_t *c_element;
  397. unsigned int sig_bit;
  398. uint16_t desc_per_page;
  399. if (!osc) {
  400. ret = -EINVAL;
  401. goto ol_attach_fail;
  402. }
  403. /*
  404. * For LL, limit the number of host's tx descriptors to match
  405. * the number of target FW tx descriptors.
  406. * This simplifies the FW, by ensuring the host will never
  407. * download more tx descriptors than the target has space for.
  408. * The FW will drop/free low-priority tx descriptors when it
  409. * starts to run low, so that in theory the host should never
  410. * run out of tx descriptors.
  411. */
  412. /* initialize the counter of the target's tx buffer availability */
  413. cdf_atomic_init(&pdev->target_tx_credit);
  414. cdf_atomic_init(&pdev->orig_target_tx_credit);
  415. /*
  416. * LL - initialize the target credit outselves.
  417. * HL - wait for a HTT target credit initialization during htt_attach.
  418. */
  419. cdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
  420. &pdev->target_tx_credit);
  421. desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
  422. setup_fastpath_ce_handles(osc, pdev);
  423. ret = htt_attach(pdev->htt_pdev, desc_pool_size);
  424. if (ret)
  425. goto ol_attach_fail;
  426. /* Update CE's pkt download length */
  427. ce_pkt_dl_len_set((void *)osc, htt_pkt_dl_len_get(pdev->htt_pdev));
  428. /* Attach micro controller data path offload resource */
  429. if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
  430. if (htt_ipa_uc_attach(pdev->htt_pdev))
  431. goto uc_attach_fail;
  432. /* Calculate single element reserved size power of 2 */
  433. pdev->tx_desc.desc_reserved_size = cdf_get_pwr2(desc_element_size);
  434. cdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
  435. pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
  436. if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
  437. (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
  438. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  439. "Page alloc fail");
  440. goto page_alloc_fail;
  441. }
  442. desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
  443. pdev->tx_desc.offset_filter = desc_per_page - 1;
  444. /* Calculate page divider to find page number */
  445. sig_bit = 0;
  446. while (desc_per_page) {
  447. sig_bit++;
  448. desc_per_page = desc_per_page >> 1;
  449. }
  450. pdev->tx_desc.page_divider = (sig_bit - 1);
  451. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  452. "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
  453. pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
  454. desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
  455. pdev->tx_desc.desc_pages.num_element_per_page);
  456. /*
  457. * Each SW tx desc (used only within the tx datapath SW) has a
  458. * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
  459. * Go ahead and allocate the HTT tx desc and link it with the SW tx
  460. * desc now, to avoid doing it during time-critical transmit.
  461. */
  462. pdev->tx_desc.pool_size = desc_pool_size;
  463. pdev->tx_desc.freelist =
  464. (union ol_tx_desc_list_elem_t *)
  465. (*pdev->tx_desc.desc_pages.cacheable_pages);
  466. c_element = pdev->tx_desc.freelist;
  467. for (i = 0; i < desc_pool_size; i++) {
  468. void *htt_tx_desc;
  469. void *htt_frag_desc = NULL;
  470. uint32_t frag_paddr_lo = 0;
  471. uint32_t paddr_lo;
  472. if (i == (desc_pool_size - 1))
  473. c_element->next = NULL;
  474. else
  475. c_element->next = (union ol_tx_desc_list_elem_t *)
  476. ol_tx_desc_find(pdev, i + 1);
  477. htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr_lo, i);
  478. if (!htt_tx_desc) {
  479. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
  480. "%s: failed to alloc HTT tx desc (%d of %d)",
  481. __func__, i, desc_pool_size);
  482. fail_idx = i;
  483. goto desc_alloc_fail;
  484. }
  485. c_element->tx_desc.htt_tx_desc = htt_tx_desc;
  486. c_element->tx_desc.htt_tx_desc_paddr = paddr_lo;
  487. ret = htt_tx_frag_alloc(pdev->htt_pdev,
  488. i, &frag_paddr_lo, &htt_frag_desc);
  489. if (ret) {
  490. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  491. "%s: failed to alloc HTT frag dsc (%d/%d)",
  492. __func__, i, desc_pool_size);
  493. /* Is there a leak here, is this handling correct? */
  494. fail_idx = i;
  495. goto desc_alloc_fail;
  496. }
  497. if (!ret && htt_frag_desc) {
  498. /* Initialize the first 6 words (TSO flags)
  499. of the frag descriptor */
  500. memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
  501. c_element->tx_desc.htt_frag_desc = htt_frag_desc;
  502. c_element->tx_desc.htt_frag_desc_paddr = frag_paddr_lo;
  503. }
  504. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  505. "%s:%d - %d FRAG VA 0x%p FRAG PA 0x%x",
  506. __func__, __LINE__, i,
  507. c_element->tx_desc.htt_frag_desc,
  508. c_element->tx_desc.htt_frag_desc_paddr);
  509. #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
  510. c_element->tx_desc.pkt_type = 0xff;
  511. #ifdef QCA_COMPUTE_TX_DELAY
  512. c_element->tx_desc.entry_timestamp_ticks =
  513. 0xffffffff;
  514. #endif
  515. #endif
  516. c_element->tx_desc.id = i;
  517. cdf_atomic_init(&c_element->tx_desc.ref_cnt);
  518. c_element = c_element->next;
  519. fail_idx = i;
  520. }
  521. /* link SW tx descs into a freelist */
  522. pdev->tx_desc.num_free = desc_pool_size;
  523. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  524. "%s first tx_desc:0x%p Last tx desc:0x%p\n", __func__,
  525. (uint32_t *) pdev->tx_desc.freelist,
  526. (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
  527. /* check what format of frames are expected to be delivered by the OS */
  528. pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
  529. if (pdev->frame_format == wlan_frm_fmt_native_wifi)
  530. pdev->htt_pkt_type = htt_pkt_type_native_wifi;
  531. else if (pdev->frame_format == wlan_frm_fmt_802_3) {
  532. if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
  533. pdev->htt_pkt_type = htt_pkt_type_eth2;
  534. else
  535. pdev->htt_pkt_type = htt_pkt_type_ethernet;
  536. } else {
  537. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  538. "%s Invalid standard frame type: %d",
  539. __func__, pdev->frame_format);
  540. goto control_init_fail;
  541. }
  542. /* setup the global rx defrag waitlist */
  543. TAILQ_INIT(&pdev->rx.defrag.waitlist);
  544. /* configure where defrag timeout and duplicate detection is handled */
  545. pdev->rx.flags.defrag_timeout_check =
  546. pdev->rx.flags.dup_check =
  547. ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
  548. #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
  549. /* Need to revisit this part. Currently,hardcode to riva's caps */
  550. pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
  551. pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
  552. /*
  553. * The Riva HW de-aggregate doesn't have capability to generate 802.11
  554. * header for non-first subframe of A-MSDU.
  555. */
  556. pdev->sw_subfrm_hdr_recovery_enable = 1;
  557. /*
  558. * The Riva HW doesn't have the capability to set Protected Frame bit
  559. * in the MAC header for encrypted data frame.
  560. */
  561. pdev->sw_pf_proc_enable = 1;
  562. if (pdev->frame_format == wlan_frm_fmt_802_3) {
  563. /* sw llc process is only needed in
  564. 802.3 to 802.11 transform case */
  565. pdev->sw_tx_llc_proc_enable = 1;
  566. pdev->sw_rx_llc_proc_enable = 1;
  567. } else {
  568. pdev->sw_tx_llc_proc_enable = 0;
  569. pdev->sw_rx_llc_proc_enable = 0;
  570. }
  571. switch (pdev->frame_format) {
  572. case wlan_frm_fmt_raw:
  573. pdev->sw_tx_encap =
  574. pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
  575. ? 0 : 1;
  576. pdev->sw_rx_decap =
  577. pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
  578. ? 0 : 1;
  579. break;
  580. case wlan_frm_fmt_native_wifi:
  581. pdev->sw_tx_encap =
  582. pdev->
  583. target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
  584. ? 0 : 1;
  585. pdev->sw_rx_decap =
  586. pdev->
  587. target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
  588. ? 0 : 1;
  589. break;
  590. case wlan_frm_fmt_802_3:
  591. pdev->sw_tx_encap =
  592. pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
  593. ? 0 : 1;
  594. pdev->sw_rx_decap =
  595. pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
  596. ? 0 : 1;
  597. break;
  598. default:
  599. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  600. "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
  601. pdev->frame_format,
  602. pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
  603. goto control_init_fail;
  604. }
  605. #endif
  606. /*
  607. * Determine what rx processing steps are done within the host.
  608. * Possibilities:
  609. * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
  610. * (This is unlikely; even if the target is doing rx->tx forwarding,
  611. * the host should be doing rx->tx forwarding too, as a back up for
  612. * the target's rx->tx forwarding, in case the target runs short on
  613. * memory, and can't store rx->tx frames that are waiting for
  614. * missing prior rx frames to arrive.)
  615. * 2. Just rx -> tx forwarding.
  616. * This is the typical configuration for HL, and a likely
  617. * configuration for LL STA or small APs (e.g. retail APs).
  618. * 3. Both PN check and rx -> tx forwarding.
  619. * This is the typical configuration for large LL APs.
  620. * Host-side PN check without rx->tx forwarding is not a valid
  621. * configuration, since the PN check needs to be done prior to
  622. * the rx->tx forwarding.
  623. */
  624. if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
  625. /* PN check, rx-tx forwarding and rx reorder is done by
  626. the target */
  627. if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
  628. pdev->rx_opt_proc = ol_rx_in_order_deliver;
  629. else
  630. pdev->rx_opt_proc = ol_rx_fwd_check;
  631. } else {
  632. if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
  633. if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
  634. /*
  635. * PN check done on host,
  636. * rx->tx forwarding not done at all.
  637. */
  638. pdev->rx_opt_proc = ol_rx_pn_check_only;
  639. } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
  640. /*
  641. * Both PN check and rx->tx forwarding done
  642. * on host.
  643. */
  644. pdev->rx_opt_proc = ol_rx_pn_check;
  645. } else {
  646. #define TRACESTR01 "invalid config: if rx PN check is on the host,"\
  647. "rx->tx forwarding check needs to also be on the host"
  648. CDF_TRACE(CDF_MODULE_ID_TXRX,
  649. CDF_TRACE_LEVEL_ERROR,
  650. "%s: %s", __func__, TRACESTR01);
  651. #undef TRACESTR01
  652. goto control_init_fail;
  653. }
  654. } else {
  655. /* PN check done on target */
  656. if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
  657. ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
  658. /*
  659. * rx->tx forwarding done on host (possibly as
  660. * back-up for target-side primary rx->tx
  661. * forwarding)
  662. */
  663. pdev->rx_opt_proc = ol_rx_fwd_check;
  664. } else {
  665. /* rx->tx forwarding either done in target,
  666. * or not done at all */
  667. pdev->rx_opt_proc = ol_rx_deliver;
  668. }
  669. }
  670. }
  671. /* initialize mutexes for tx desc alloc and peer lookup */
  672. cdf_spinlock_init(&pdev->tx_mutex);
  673. cdf_spinlock_init(&pdev->peer_ref_mutex);
  674. cdf_spinlock_init(&pdev->rx.mutex);
  675. cdf_spinlock_init(&pdev->last_real_peer_mutex);
  676. OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
  677. if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK)
  678. goto reorder_trace_attach_fail;
  679. if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK)
  680. goto pn_trace_attach_fail;
  681. #ifdef PERE_IP_HDR_ALIGNMENT_WAR
  682. pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
  683. #endif
  684. /*
  685. * WDI event attach
  686. */
  687. wdi_event_attach(pdev);
  688. /*
  689. * Initialize rx PN check characteristics for different security types.
  690. */
  691. cdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
  692. /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
  693. pdev->rx_pn[htt_sec_type_tkip].len =
  694. pdev->rx_pn[htt_sec_type_tkip_nomic].len =
  695. pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
  696. pdev->rx_pn[htt_sec_type_tkip].cmp =
  697. pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
  698. pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
  699. /* WAPI: 128-bit PN */
  700. pdev->rx_pn[htt_sec_type_wapi].len = 128;
  701. pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
  702. OL_RX_REORDER_TIMEOUT_INIT(pdev);
  703. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Created pdev %p\n", pdev);
  704. pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
  705. #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
  706. #define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
  707. /* #if 1 -- TODO: clean this up */
  708. #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
  709. /* avg = 100% * new + 0% * old */ \
  710. (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
  711. /*
  712. #else
  713. #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
  714. //avg = 25% * new + 25% * old
  715. (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
  716. #endif
  717. */
  718. pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
  719. pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
  720. #endif
  721. ol_txrx_local_peer_id_pool_init(pdev);
  722. pdev->cfg.ll_pause_txq_limit =
  723. ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
  724. #ifdef QCA_COMPUTE_TX_DELAY
  725. cdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
  726. cdf_spinlock_init(&pdev->tx_delay.mutex);
  727. /* initialize compute interval with 5 seconds (ESE default) */
  728. pdev->tx_delay.avg_period_ticks = cdf_system_msecs_to_ticks(5000);
  729. {
  730. uint32_t bin_width_1000ticks;
  731. bin_width_1000ticks =
  732. cdf_system_msecs_to_ticks
  733. (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
  734. * 1000);
  735. /*
  736. * Compute a factor and shift that together are equal to the
  737. * inverse of the bin_width time, so that rather than dividing
  738. * by the bin width time, approximately the same result can be
  739. * obtained much more efficiently by a multiply + shift.
  740. * multiply_factor >> shift = 1 / bin_width_time, so
  741. * multiply_factor = (1 << shift) / bin_width_time.
  742. *
  743. * Pick the shift semi-arbitrarily.
  744. * If we knew statically what the bin_width would be, we could
  745. * choose a shift that minimizes the error.
  746. * Since the bin_width is determined dynamically, simply use a
  747. * shift that is about half of the uint32_t size. This should
  748. * result in a relatively large multiplier value, which
  749. * minimizes error from rounding the multiplier to an integer.
  750. * The rounding error only becomes significant if the tick units
  751. * are on the order of 1 microsecond. In most systems, it is
  752. * expected that the tick units will be relatively low-res,
  753. * on the order of 1 millisecond. In such systems the rounding
  754. * error is negligible.
  755. * It would be more accurate to dynamically try out different
  756. * shifts and choose the one that results in the smallest
  757. * rounding error, but that extra level of fidelity is
  758. * not needed.
  759. */
  760. pdev->tx_delay.hist_internal_bin_width_shift = 16;
  761. pdev->tx_delay.hist_internal_bin_width_mult =
  762. ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
  763. 1000 + (bin_width_1000ticks >> 1)) /
  764. bin_width_1000ticks;
  765. }
  766. #endif /* QCA_COMPUTE_TX_DELAY */
  767. /* Thermal Mitigation */
  768. ol_tx_throttle_init(pdev);
  769. ol_tso_seg_list_init(pdev, desc_pool_size);
  770. ol_tx_register_flow_control(pdev);
  771. return 0; /* success */
  772. pn_trace_attach_fail:
  773. OL_RX_REORDER_TRACE_DETACH(pdev);
  774. reorder_trace_attach_fail:
  775. cdf_spinlock_destroy(&pdev->tx_mutex);
  776. cdf_spinlock_destroy(&pdev->peer_ref_mutex);
  777. cdf_spinlock_destroy(&pdev->rx.mutex);
  778. cdf_spinlock_destroy(&pdev->last_real_peer_mutex);
  779. OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
  780. control_init_fail:
  781. desc_alloc_fail:
  782. for (i = 0; i < fail_idx; i++)
  783. htt_tx_desc_free(pdev->htt_pdev,
  784. (ol_tx_desc_find(pdev, i))->htt_tx_desc);
  785. cdf_mem_multi_pages_free(pdev->osdev,
  786. &pdev->tx_desc.desc_pages, 0, true);
  787. page_alloc_fail:
  788. if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
  789. htt_ipa_uc_detach(pdev->htt_pdev);
  790. uc_attach_fail:
  791. htt_detach(pdev->htt_pdev);
  792. ol_attach_fail:
  793. return ret; /* fail */
  794. }
  795. A_STATUS ol_txrx_pdev_attach_target(ol_txrx_pdev_handle pdev)
  796. {
  797. return htt_attach_target(pdev->htt_pdev);
  798. }
  799. void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
  800. {
  801. int i;
  802. /*checking to ensure txrx pdev structure is not NULL */
  803. if (!pdev) {
  804. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "NULL pdev passed to %s\n", __func__);
  805. return;
  806. }
  807. /* preconditions */
  808. TXRX_ASSERT2(pdev);
  809. /* check that the pdev has no vdevs allocated */
  810. TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
  811. OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
  812. #ifdef QCA_SUPPORT_TX_THROTTLE
  813. /* Thermal Mitigation */
  814. cdf_softirq_timer_cancel(&pdev->tx_throttle.phase_timer);
  815. cdf_softirq_timer_free(&pdev->tx_throttle.phase_timer);
  816. #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
  817. cdf_softirq_timer_cancel(&pdev->tx_throttle.tx_timer);
  818. cdf_softirq_timer_free(&pdev->tx_throttle.tx_timer);
  819. #endif
  820. #endif
  821. ol_tso_seg_list_deinit(pdev);
  822. ol_tx_deregister_flow_control(pdev);
  823. if (force) {
  824. /*
  825. * The assertion above confirms that all vdevs within this pdev
  826. * were detached. However, they may not have actually been
  827. * deleted.
  828. * If the vdev had peers which never received a PEER_UNMAP msg
  829. * from the target, then there are still zombie peer objects,
  830. * and the vdev parents of the zombie peers are also zombies,
  831. * hanging around until their final peer gets deleted.
  832. * Go through the peer hash table and delete any peers left.
  833. * As a side effect, this will complete the deletion of any
  834. * vdevs that are waiting for their peers to finish deletion.
  835. */
  836. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Force delete for pdev %p\n",
  837. pdev);
  838. ol_txrx_peer_find_hash_erase(pdev);
  839. }
  840. /* Stop the communication between HTT and target at first */
  841. htt_detach_target(pdev->htt_pdev);
  842. for (i = 0; i < pdev->tx_desc.pool_size; i++) {
  843. void *htt_tx_desc;
  844. struct ol_tx_desc_t *tx_desc;
  845. tx_desc = ol_tx_desc_find(pdev, i);
  846. /*
  847. * Confirm that each tx descriptor is "empty", i.e. it has
  848. * no tx frame attached.
  849. * In particular, check that there are no frames that have
  850. * been given to the target to transmit, for which the
  851. * target has never provided a response.
  852. */
  853. if (cdf_atomic_read(&tx_desc->ref_cnt)) {
  854. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
  855. "Warning: freeing tx frame (no compltn)\n");
  856. ol_tx_desc_frame_free_nonstd(pdev,
  857. tx_desc, 1);
  858. }
  859. htt_tx_desc = tx_desc->htt_tx_desc;
  860. htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
  861. }
  862. cdf_mem_multi_pages_free(pdev->osdev,
  863. &pdev->tx_desc.desc_pages, 0, true);
  864. pdev->tx_desc.freelist = NULL;
  865. /* Detach micro controller data path offload resource */
  866. if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
  867. htt_ipa_uc_detach(pdev->htt_pdev);
  868. htt_detach(pdev->htt_pdev);
  869. htt_pdev_free(pdev->htt_pdev);
  870. ol_txrx_peer_find_detach(pdev);
  871. cdf_spinlock_destroy(&pdev->tx_mutex);
  872. cdf_spinlock_destroy(&pdev->peer_ref_mutex);
  873. cdf_spinlock_destroy(&pdev->last_real_peer_mutex);
  874. cdf_spinlock_destroy(&pdev->rx.mutex);
  875. #ifdef QCA_SUPPORT_TX_THROTTLE
  876. /* Thermal Mitigation */
  877. cdf_spinlock_destroy(&pdev->tx_throttle.mutex);
  878. #endif
  879. OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
  880. OL_RX_REORDER_TRACE_DETACH(pdev);
  881. OL_RX_PN_TRACE_DETACH(pdev);
  882. /*
  883. * WDI event detach
  884. */
  885. wdi_event_detach(pdev);
  886. ol_txrx_local_peer_id_cleanup(pdev);
  887. #ifdef QCA_COMPUTE_TX_DELAY
  888. cdf_spinlock_destroy(&pdev->tx_delay.mutex);
  889. #endif
  890. }
  891. ol_txrx_vdev_handle
  892. ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
  893. uint8_t *vdev_mac_addr,
  894. uint8_t vdev_id, enum wlan_op_mode op_mode)
  895. {
  896. struct ol_txrx_vdev_t *vdev;
  897. /* preconditions */
  898. TXRX_ASSERT2(pdev);
  899. TXRX_ASSERT2(vdev_mac_addr);
  900. vdev = cdf_mem_malloc(sizeof(*vdev));
  901. if (!vdev)
  902. return NULL; /* failure */
  903. /* store provided params */
  904. vdev->pdev = pdev;
  905. vdev->vdev_id = vdev_id;
  906. vdev->opmode = op_mode;
  907. vdev->delete.pending = 0;
  908. vdev->safemode = 0;
  909. vdev->drop_unenc = 1;
  910. vdev->num_filters = 0;
  911. cdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
  912. OL_TXRX_MAC_ADDR_LEN);
  913. TAILQ_INIT(&vdev->peer_list);
  914. vdev->last_real_peer = NULL;
  915. #ifdef QCA_IBSS_SUPPORT
  916. vdev->ibss_peer_num = 0;
  917. vdev->ibss_peer_heart_beat_timer = 0;
  918. #endif
  919. cdf_spinlock_init(&vdev->ll_pause.mutex);
  920. vdev->ll_pause.paused_reason = 0;
  921. vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
  922. vdev->ll_pause.txq.depth = 0;
  923. cdf_softirq_timer_init(pdev->osdev,
  924. &vdev->ll_pause.timer,
  925. ol_tx_vdev_ll_pause_queue_send, vdev,
  926. CDF_TIMER_TYPE_SW);
  927. cdf_atomic_init(&vdev->os_q_paused);
  928. cdf_atomic_set(&vdev->os_q_paused, 0);
  929. vdev->tx_fl_lwm = 0;
  930. vdev->tx_fl_hwm = 0;
  931. vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
  932. cdf_spinlock_init(&vdev->flow_control_lock);
  933. vdev->osif_flow_control_cb = NULL;
  934. vdev->osif_fc_ctx = NULL;
  935. /* Default MAX Q depth for every VDEV */
  936. vdev->ll_pause.max_q_depth =
  937. ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
  938. /* add this vdev into the pdev's list */
  939. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  940. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  941. "Created vdev %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
  942. vdev,
  943. vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
  944. vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
  945. vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
  946. /*
  947. * We've verified that htt_op_mode == wlan_op_mode,
  948. * so no translation is needed.
  949. */
  950. htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
  951. return vdev;
  952. }
  953. void ol_txrx_osif_vdev_register(ol_txrx_vdev_handle vdev,
  954. void *osif_vdev,
  955. struct ol_txrx_osif_ops *txrx_ops)
  956. {
  957. vdev->osif_dev = osif_vdev;
  958. txrx_ops->tx.std = vdev->tx = OL_TX_LL;
  959. txrx_ops->tx.non_std = ol_tx_non_std_ll;
  960. }
  961. void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
  962. {
  963. return;
  964. }
  965. void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
  966. {
  967. vdev->safemode = val;
  968. }
  969. void
  970. ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
  971. void *filters, uint32_t num)
  972. {
  973. cdf_mem_copy(vdev->privacy_filters, filters,
  974. num * sizeof(struct privacy_exemption));
  975. vdev->num_filters = num;
  976. }
  977. void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
  978. {
  979. vdev->drop_unenc = val;
  980. }
  981. void
  982. ol_txrx_vdev_detach(ol_txrx_vdev_handle vdev,
  983. ol_txrx_vdev_delete_cb callback, void *context)
  984. {
  985. struct ol_txrx_pdev_t *pdev = vdev->pdev;
  986. /* preconditions */
  987. TXRX_ASSERT2(vdev);
  988. cdf_spin_lock_bh(&vdev->ll_pause.mutex);
  989. cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
  990. cdf_softirq_timer_free(&vdev->ll_pause.timer);
  991. vdev->ll_pause.is_q_timer_on = false;
  992. while (vdev->ll_pause.txq.head) {
  993. cdf_nbuf_t next = cdf_nbuf_next(vdev->ll_pause.txq.head);
  994. cdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
  995. cdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
  996. CDF_DMA_TO_DEVICE);
  997. cdf_nbuf_tx_free(vdev->ll_pause.txq.head, NBUF_PKT_ERROR);
  998. vdev->ll_pause.txq.head = next;
  999. }
  1000. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  1001. cdf_spinlock_destroy(&vdev->ll_pause.mutex);
  1002. cdf_spin_lock_bh(&vdev->flow_control_lock);
  1003. vdev->osif_flow_control_cb = NULL;
  1004. vdev->osif_fc_ctx = NULL;
  1005. cdf_spin_unlock_bh(&vdev->flow_control_lock);
  1006. cdf_spinlock_destroy(&vdev->flow_control_lock);
  1007. /* remove the vdev from its parent pdev's list */
  1008. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  1009. /*
  1010. * Use peer_ref_mutex while accessing peer_list, in case
  1011. * a peer is in the process of being removed from the list.
  1012. */
  1013. cdf_spin_lock_bh(&pdev->peer_ref_mutex);
  1014. /* check that the vdev has no peers allocated */
  1015. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  1016. /* debug print - will be removed later */
  1017. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  1018. "%s: not deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x)"
  1019. "until deletion finishes for all its peers\n",
  1020. __func__, vdev,
  1021. vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
  1022. vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
  1023. vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
  1024. /* indicate that the vdev needs to be deleted */
  1025. vdev->delete.pending = 1;
  1026. vdev->delete.callback = callback;
  1027. vdev->delete.context = context;
  1028. cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
  1029. return;
  1030. }
  1031. cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
  1032. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  1033. "%s: deleting vdev obj %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
  1034. __func__, vdev,
  1035. vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
  1036. vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
  1037. vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
  1038. htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
  1039. /*
  1040. * Doesn't matter if there are outstanding tx frames -
  1041. * they will be freed once the target sends a tx completion
  1042. * message for them.
  1043. */
  1044. cdf_mem_free(vdev);
  1045. if (callback)
  1046. callback(context);
  1047. }
  1048. /**
  1049. * ol_txrx_flush_rx_frames() - flush cached rx frames
  1050. * @peer: peer
  1051. * @drop: set flag to drop frames
  1052. *
  1053. * Return: None
  1054. */
  1055. void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
  1056. bool drop)
  1057. {
  1058. struct ol_rx_cached_buf *cache_buf;
  1059. CDF_STATUS ret;
  1060. ol_rx_callback_fp data_rx = NULL;
  1061. void *cds_ctx = cds_get_global_context();
  1062. if (cdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
  1063. cdf_atomic_dec(&peer->flush_in_progress);
  1064. return;
  1065. }
  1066. cdf_assert(cds_ctx);
  1067. cdf_spin_lock_bh(&peer->peer_info_lock);
  1068. if (peer->state >= ol_txrx_peer_state_conn)
  1069. data_rx = peer->osif_rx;
  1070. else
  1071. drop = true;
  1072. cdf_spin_unlock_bh(&peer->peer_info_lock);
  1073. cdf_spin_lock_bh(&peer->bufq_lock);
  1074. cache_buf = list_entry((&peer->cached_bufq)->next,
  1075. typeof(*cache_buf), list);
  1076. while (!list_empty(&peer->cached_bufq)) {
  1077. list_del(&cache_buf->list);
  1078. cdf_spin_unlock_bh(&peer->bufq_lock);
  1079. if (drop) {
  1080. cdf_nbuf_free(cache_buf->buf);
  1081. } else {
  1082. /* Flush the cached frames to HDD */
  1083. ret = data_rx(cds_ctx, cache_buf->buf, peer->local_id);
  1084. if (ret != CDF_STATUS_SUCCESS)
  1085. cdf_nbuf_free(cache_buf->buf);
  1086. }
  1087. cdf_mem_free(cache_buf);
  1088. cdf_spin_lock_bh(&peer->bufq_lock);
  1089. cache_buf = list_entry((&peer->cached_bufq)->next,
  1090. typeof(*cache_buf), list);
  1091. }
  1092. cdf_spin_unlock_bh(&peer->bufq_lock);
  1093. cdf_atomic_dec(&peer->flush_in_progress);
  1094. }
  1095. ol_txrx_peer_handle
  1096. ol_txrx_peer_attach(ol_txrx_pdev_handle pdev,
  1097. ol_txrx_vdev_handle vdev, uint8_t *peer_mac_addr)
  1098. {
  1099. struct ol_txrx_peer_t *peer;
  1100. struct ol_txrx_peer_t *temp_peer;
  1101. uint8_t i;
  1102. int differs;
  1103. bool wait_on_deletion = false;
  1104. unsigned long rc;
  1105. /* preconditions */
  1106. TXRX_ASSERT2(pdev);
  1107. TXRX_ASSERT2(vdev);
  1108. TXRX_ASSERT2(peer_mac_addr);
  1109. cdf_spin_lock_bh(&pdev->peer_ref_mutex);
  1110. /* check for duplicate exsisting peer */
  1111. TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
  1112. if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
  1113. (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
  1114. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1115. "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exsist.\n",
  1116. vdev->vdev_id,
  1117. peer_mac_addr[0], peer_mac_addr[1],
  1118. peer_mac_addr[2], peer_mac_addr[3],
  1119. peer_mac_addr[4], peer_mac_addr[5]);
  1120. if (cdf_atomic_read(&temp_peer->delete_in_progress)) {
  1121. vdev->wait_on_peer_id = temp_peer->local_id;
  1122. cdf_event_init(&vdev->wait_delete_comp);
  1123. wait_on_deletion = true;
  1124. } else {
  1125. cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
  1126. return NULL;
  1127. }
  1128. }
  1129. }
  1130. cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
  1131. if (wait_on_deletion) {
  1132. /* wait for peer deletion */
  1133. rc = cdf_wait_single_event(&vdev->wait_delete_comp,
  1134. cdf_system_msecs_to_ticks(PEER_DELETION_TIMEOUT));
  1135. if (!rc) {
  1136. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1137. "timedout waiting for peer(%d) deletion\n",
  1138. vdev->wait_on_peer_id);
  1139. vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
  1140. return NULL;
  1141. }
  1142. }
  1143. peer = cdf_mem_malloc(sizeof(*peer));
  1144. if (!peer)
  1145. return NULL; /* failure */
  1146. cdf_mem_zero(peer, sizeof(*peer));
  1147. /* store provided params */
  1148. peer->vdev = vdev;
  1149. cdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
  1150. OL_TXRX_MAC_ADDR_LEN);
  1151. INIT_LIST_HEAD(&peer->cached_bufq);
  1152. cdf_spin_lock_bh(&pdev->peer_ref_mutex);
  1153. /* add this peer into the vdev's list */
  1154. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  1155. cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
  1156. /* check whether this is a real peer (peer mac addr != vdev mac addr) */
  1157. if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr))
  1158. vdev->last_real_peer = peer;
  1159. peer->rx_opt_proc = pdev->rx_opt_proc;
  1160. ol_rx_peer_init(pdev, peer);
  1161. /* initialize the peer_id */
  1162. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  1163. peer->peer_ids[i] = HTT_INVALID_PEER;
  1164. peer->osif_rx = NULL;
  1165. cdf_spinlock_init(&peer->peer_info_lock);
  1166. cdf_spinlock_init(&peer->bufq_lock);
  1167. cdf_atomic_init(&peer->delete_in_progress);
  1168. cdf_atomic_init(&peer->flush_in_progress);
  1169. cdf_atomic_init(&peer->ref_cnt);
  1170. /* keep one reference for attach */
  1171. cdf_atomic_inc(&peer->ref_cnt);
  1172. /* keep one reference for ol_rx_peer_map_handler */
  1173. cdf_atomic_inc(&peer->ref_cnt);
  1174. peer->valid = 1;
  1175. ol_txrx_peer_find_hash_add(pdev, peer);
  1176. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
  1177. "vdev %p created peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
  1178. vdev, peer,
  1179. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  1180. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  1181. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  1182. /*
  1183. * For every peer MAp message search and set if bss_peer
  1184. */
  1185. differs =
  1186. cdf_mem_compare(peer->mac_addr.raw, vdev->mac_addr.raw,
  1187. OL_TXRX_MAC_ADDR_LEN);
  1188. if (!differs)
  1189. peer->bss_peer = 1;
  1190. /*
  1191. * The peer starts in the "disc" state while association is in progress.
  1192. * Once association completes, the peer will get updated to "auth" state
  1193. * by a call to ol_txrx_peer_state_update if the peer is in open mode,
  1194. * or else to the "conn" state. For non-open mode, the peer will
  1195. * progress to "auth" state once the authentication completes.
  1196. */
  1197. peer->state = ol_txrx_peer_state_invalid;
  1198. ol_txrx_peer_state_update(pdev, peer->mac_addr.raw,
  1199. ol_txrx_peer_state_disc);
  1200. #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
  1201. peer->rssi_dbm = HTT_RSSI_INVALID;
  1202. #endif
  1203. ol_txrx_local_peer_id_alloc(pdev, peer);
  1204. return peer;
  1205. }
  1206. /*
  1207. * Discarding tx filter - removes all data frames (disconnected state)
  1208. */
  1209. static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
  1210. {
  1211. return A_ERROR;
  1212. }
  1213. /*
  1214. * Non-autentication tx filter - filters out data frames that are not
  1215. * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
  1216. * data frames (connected state)
  1217. */
  1218. static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
  1219. {
  1220. return
  1221. (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
  1222. tx_msdu_info->htt.info.ethertype ==
  1223. ETHERTYPE_WAI) ? A_OK : A_ERROR;
  1224. }
  1225. /*
  1226. * Pass-through tx filter - lets all data frames through (authenticated state)
  1227. */
  1228. static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
  1229. {
  1230. return A_OK;
  1231. }
  1232. CDF_STATUS
  1233. ol_txrx_peer_state_update(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac,
  1234. enum ol_txrx_peer_state state)
  1235. {
  1236. struct ol_txrx_peer_t *peer;
  1237. if (cdf_unlikely(!pdev)) {
  1238. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Pdev is NULL");
  1239. cdf_assert(0);
  1240. return CDF_STATUS_E_INVAL;
  1241. }
  1242. peer = ol_txrx_peer_find_hash_find(pdev, peer_mac, 0, 1);
  1243. if (NULL == peer) {
  1244. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: peer is null for peer_mac"
  1245. " 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __FUNCTION__,
  1246. peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
  1247. peer_mac[4], peer_mac[5]);
  1248. return CDF_STATUS_E_INVAL;
  1249. }
  1250. /* TODO: Should we send WMI command of the connection state? */
  1251. /* avoid multiple auth state change. */
  1252. if (peer->state == state) {
  1253. #ifdef TXRX_PRINT_VERBOSE_ENABLE
  1254. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO3,
  1255. "%s: no state change, returns directly\n",
  1256. __func__);
  1257. #endif
  1258. cdf_atomic_dec(&peer->ref_cnt);
  1259. return CDF_STATUS_SUCCESS;
  1260. }
  1261. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: change from %d to %d\n",
  1262. __func__, peer->state, state);
  1263. peer->tx_filter = (state == ol_txrx_peer_state_auth)
  1264. ? ol_tx_filter_pass_thru
  1265. : ((state == ol_txrx_peer_state_conn)
  1266. ? ol_tx_filter_non_auth
  1267. : ol_tx_filter_discard);
  1268. if (peer->vdev->pdev->cfg.host_addba) {
  1269. if (state == ol_txrx_peer_state_auth) {
  1270. int tid;
  1271. /*
  1272. * Pause all regular (non-extended) TID tx queues until
  1273. * data arrives and ADDBA negotiation has completed.
  1274. */
  1275. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
  1276. "%s: pause peer and unpause mgmt/non-qos\n",
  1277. __func__);
  1278. ol_txrx_peer_pause(peer); /* pause all tx queues */
  1279. /* unpause mgmt and non-QoS tx queues */
  1280. for (tid = OL_TX_NUM_QOS_TIDS;
  1281. tid < OL_TX_NUM_TIDS; tid++)
  1282. ol_txrx_peer_tid_unpause(peer, tid);
  1283. }
  1284. }
  1285. cdf_atomic_dec(&peer->ref_cnt);
  1286. /* Set the state after the Pause to avoid the race condiction
  1287. with ADDBA check in tx path */
  1288. peer->state = state;
  1289. return CDF_STATUS_SUCCESS;
  1290. }
  1291. void
  1292. ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
  1293. {
  1294. peer->keyinstalled = val;
  1295. }
  1296. void
  1297. ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
  1298. uint8_t *peer_mac,
  1299. union ol_txrx_peer_update_param_t *param,
  1300. enum ol_txrx_peer_update_select_t select)
  1301. {
  1302. struct ol_txrx_peer_t *peer;
  1303. peer = ol_txrx_peer_find_hash_find(vdev->pdev, peer_mac, 0, 1);
  1304. if (!peer) {
  1305. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: peer is null",
  1306. __func__);
  1307. return;
  1308. }
  1309. switch (select) {
  1310. case ol_txrx_peer_update_qos_capable:
  1311. {
  1312. /* save qos_capable here txrx peer,
  1313. * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
  1314. */
  1315. peer->qos_capable = param->qos_capable;
  1316. /*
  1317. * The following function call assumes that the peer has a
  1318. * single ID. This is currently true, and
  1319. * is expected to remain true.
  1320. */
  1321. htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
  1322. peer->peer_ids[0],
  1323. peer->qos_capable);
  1324. break;
  1325. }
  1326. case ol_txrx_peer_update_uapsdMask:
  1327. {
  1328. peer->uapsd_mask = param->uapsd_mask;
  1329. htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
  1330. peer->peer_ids[0],
  1331. peer->uapsd_mask);
  1332. break;
  1333. }
  1334. case ol_txrx_peer_update_peer_security:
  1335. {
  1336. enum ol_sec_type sec_type = param->sec_type;
  1337. enum htt_sec_type peer_sec_type = htt_sec_type_none;
  1338. switch (sec_type) {
  1339. case ol_sec_type_none:
  1340. peer_sec_type = htt_sec_type_none;
  1341. break;
  1342. case ol_sec_type_wep128:
  1343. peer_sec_type = htt_sec_type_wep128;
  1344. break;
  1345. case ol_sec_type_wep104:
  1346. peer_sec_type = htt_sec_type_wep104;
  1347. break;
  1348. case ol_sec_type_wep40:
  1349. peer_sec_type = htt_sec_type_wep40;
  1350. break;
  1351. case ol_sec_type_tkip:
  1352. peer_sec_type = htt_sec_type_tkip;
  1353. break;
  1354. case ol_sec_type_tkip_nomic:
  1355. peer_sec_type = htt_sec_type_tkip_nomic;
  1356. break;
  1357. case ol_sec_type_aes_ccmp:
  1358. peer_sec_type = htt_sec_type_aes_ccmp;
  1359. break;
  1360. case ol_sec_type_wapi:
  1361. peer_sec_type = htt_sec_type_wapi;
  1362. break;
  1363. default:
  1364. peer_sec_type = htt_sec_type_none;
  1365. break;
  1366. }
  1367. peer->security[txrx_sec_ucast].sec_type =
  1368. peer->security[txrx_sec_mcast].sec_type =
  1369. peer_sec_type;
  1370. break;
  1371. }
  1372. default:
  1373. {
  1374. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  1375. "ERROR: unknown param %d in %s", select,
  1376. __func__);
  1377. break;
  1378. }
  1379. }
  1380. cdf_atomic_dec(&peer->ref_cnt);
  1381. }
  1382. uint8_t
  1383. ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
  1384. {
  1385. struct ol_txrx_peer_t *peer;
  1386. peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
  1387. if (peer)
  1388. return peer->uapsd_mask;
  1389. return 0;
  1390. }
  1391. uint8_t
  1392. ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
  1393. {
  1394. struct ol_txrx_peer_t *peer_t =
  1395. ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
  1396. if (peer_t != NULL)
  1397. return peer_t->qos_capable;
  1398. return 0;
  1399. }
  1400. void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
  1401. {
  1402. struct ol_txrx_vdev_t *vdev;
  1403. struct ol_txrx_pdev_t *pdev;
  1404. int i;
  1405. /* preconditions */
  1406. TXRX_ASSERT2(peer);
  1407. vdev = peer->vdev;
  1408. if (NULL == vdev) {
  1409. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  1410. "The vdev is not present anymore\n");
  1411. return;
  1412. }
  1413. pdev = vdev->pdev;
  1414. if (NULL == pdev) {
  1415. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  1416. "The pdev is not present anymore\n");
  1417. return;
  1418. }
  1419. /*
  1420. * Check for the reference count before deleting the peer
  1421. * as we noticed that sometimes we are re-entering this
  1422. * function again which is leading to dead-lock.
  1423. * (A double-free should never happen, so assert if it does.)
  1424. */
  1425. if (0 == cdf_atomic_read(&(peer->ref_cnt))) {
  1426. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1427. "The Peer is not present anymore\n");
  1428. cdf_assert(0);
  1429. return;
  1430. }
  1431. /*
  1432. * Hold the lock all the way from checking if the peer ref count
  1433. * is zero until the peer references are removed from the hash
  1434. * table and vdev list (if the peer ref count is zero).
  1435. * This protects against a new HL tx operation starting to use the
  1436. * peer object just after this function concludes it's done being used.
  1437. * Furthermore, the lock needs to be held while checking whether the
  1438. * vdev's list of peers is empty, to make sure that list is not modified
  1439. * concurrently with the empty check.
  1440. */
  1441. cdf_spin_lock_bh(&pdev->peer_ref_mutex);
  1442. if (cdf_atomic_dec_and_test(&peer->ref_cnt)) {
  1443. u_int16_t peer_id;
  1444. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1445. "Deleting peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
  1446. peer,
  1447. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  1448. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  1449. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  1450. peer_id = peer->local_id;
  1451. /* remove the reference to the peer from the hash table */
  1452. ol_txrx_peer_find_hash_remove(pdev, peer);
  1453. /* remove the peer from its parent vdev's list */
  1454. TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
  1455. /* cleanup the Rx reorder queues for this peer */
  1456. ol_rx_peer_cleanup(vdev, peer);
  1457. /* peer is removed from peer_list */
  1458. cdf_atomic_set(&peer->delete_in_progress, 0);
  1459. /*
  1460. * Set wait_delete_comp event if the current peer id matches
  1461. * with registered peer id.
  1462. */
  1463. if (peer_id == vdev->wait_on_peer_id) {
  1464. cdf_event_set(&vdev->wait_delete_comp);
  1465. vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
  1466. }
  1467. /* check whether the parent vdev has no peers left */
  1468. if (TAILQ_EMPTY(&vdev->peer_list)) {
  1469. /*
  1470. * Check if the parent vdev was waiting for its peers
  1471. * to be deleted, in order for it to be deleted too.
  1472. */
  1473. if (vdev->delete.pending) {
  1474. ol_txrx_vdev_delete_cb vdev_delete_cb =
  1475. vdev->delete.callback;
  1476. void *vdev_delete_context =
  1477. vdev->delete.context;
  1478. /*
  1479. * Now that there are no references to the peer,
  1480. * we can release the peer reference lock.
  1481. */
  1482. cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
  1483. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  1484. "%s: deleting vdev object %p "
  1485. "(%02x:%02x:%02x:%02x:%02x:%02x)"
  1486. " - its last peer is done\n",
  1487. __func__, vdev,
  1488. vdev->mac_addr.raw[0],
  1489. vdev->mac_addr.raw[1],
  1490. vdev->mac_addr.raw[2],
  1491. vdev->mac_addr.raw[3],
  1492. vdev->mac_addr.raw[4],
  1493. vdev->mac_addr.raw[5]);
  1494. /* all peers are gone, go ahead and delete it */
  1495. cdf_mem_free(vdev);
  1496. if (vdev_delete_cb)
  1497. vdev_delete_cb(vdev_delete_context);
  1498. } else {
  1499. cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
  1500. }
  1501. } else {
  1502. cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
  1503. }
  1504. /*
  1505. * 'array' is allocated in addba handler and is supposed to be
  1506. * freed in delba handler. There is the case (for example, in
  1507. * SSR) where delba handler is not called. Because array points
  1508. * to address of 'base' by default and is reallocated in addba
  1509. * handler later, only free the memory when the array does not
  1510. * point to base.
  1511. */
  1512. for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
  1513. if (peer->tids_rx_reorder[i].array !=
  1514. &peer->tids_rx_reorder[i].base) {
  1515. TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
  1516. "%s, delete reorder arr, tid:%d\n",
  1517. __func__, i);
  1518. cdf_mem_free(peer->tids_rx_reorder[i].array);
  1519. ol_rx_reorder_init(&peer->tids_rx_reorder[i],
  1520. (uint8_t) i);
  1521. }
  1522. }
  1523. cdf_mem_free(peer);
  1524. } else {
  1525. cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
  1526. }
  1527. }
  1528. void ol_txrx_peer_detach(ol_txrx_peer_handle peer)
  1529. {
  1530. struct ol_txrx_vdev_t *vdev = peer->vdev;
  1531. /* redirect peer's rx delivery function to point to a discard func */
  1532. peer->rx_opt_proc = ol_rx_discard;
  1533. peer->valid = 0;
  1534. ol_txrx_local_peer_id_free(peer->vdev->pdev, peer);
  1535. /* debug print to dump rx reorder state */
  1536. /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
  1537. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1538. "%s:peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
  1539. __func__, peer,
  1540. peer->mac_addr.raw[0], peer->mac_addr.raw[1],
  1541. peer->mac_addr.raw[2], peer->mac_addr.raw[3],
  1542. peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
  1543. ol_txrx_flush_rx_frames(peer, 1);
  1544. if (peer->vdev->last_real_peer == peer)
  1545. peer->vdev->last_real_peer = NULL;
  1546. cdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
  1547. if (vdev->last_real_peer == peer)
  1548. vdev->last_real_peer = NULL;
  1549. cdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
  1550. htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
  1551. cdf_spinlock_destroy(&peer->peer_info_lock);
  1552. cdf_spinlock_destroy(&peer->bufq_lock);
  1553. /* set delete_in_progress to identify that wma
  1554. * is waiting for unmap massage for this peer */
  1555. cdf_atomic_set(&peer->delete_in_progress, 1);
  1556. /*
  1557. * Remove the reference added during peer_attach.
  1558. * The peer will still be left allocated until the
  1559. * PEER_UNMAP message arrives to remove the other
  1560. * reference, added by the PEER_MAP message.
  1561. */
  1562. ol_txrx_peer_unref_delete(peer);
  1563. }
  1564. ol_txrx_peer_handle
  1565. ol_txrx_peer_find_by_addr(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac_addr)
  1566. {
  1567. struct ol_txrx_peer_t *peer;
  1568. peer = ol_txrx_peer_find_hash_find(pdev, peer_mac_addr, 0, 0);
  1569. if (peer) {
  1570. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1571. "%s: Delete extra reference %p\n", __func__, peer);
  1572. /* release the extra reference */
  1573. ol_txrx_peer_unref_delete(peer);
  1574. }
  1575. return peer;
  1576. }
  1577. /**
  1578. * ol_txrx_dump_tx_desc() - dump tx desc total and free count
  1579. * @txrx_pdev: Pointer to txrx pdev
  1580. *
  1581. * Return: none
  1582. */
  1583. static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
  1584. {
  1585. struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
  1586. uint32_t total;
  1587. total = ol_tx_get_desc_global_pool_size(pdev);
  1588. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1589. "total tx credit %d num_free %d",
  1590. total, pdev->tx_desc.num_free);
  1591. return;
  1592. }
  1593. /**
  1594. * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
  1595. * @timeout: timeout in ms
  1596. *
  1597. * Wait for tx queue to be empty, return timeout error if
  1598. * queue doesn't empty before timeout occurs.
  1599. *
  1600. * Return:
  1601. * CDF_STATUS_SUCCESS if the queue empties,
  1602. * CDF_STATUS_E_TIMEOUT in case of timeout,
  1603. * CDF_STATUS_E_FAULT in case of missing handle
  1604. */
  1605. CDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
  1606. {
  1607. ol_txrx_pdev_handle txrx_pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  1608. if (txrx_pdev == NULL) {
  1609. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1610. "%s: txrx context is null", __func__);
  1611. return CDF_STATUS_E_FAULT;
  1612. }
  1613. while (ol_txrx_get_tx_pending(txrx_pdev)) {
  1614. cdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
  1615. if (timeout <= 0) {
  1616. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  1617. "%s: tx frames are pending", __func__);
  1618. ol_txrx_dump_tx_desc(txrx_pdev);
  1619. return CDF_STATUS_E_TIMEOUT;
  1620. }
  1621. timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
  1622. }
  1623. return CDF_STATUS_SUCCESS;
  1624. }
  1625. #ifndef QCA_WIFI_3_0_EMU
  1626. #define SUSPEND_DRAIN_WAIT 500
  1627. #else
  1628. #define SUSPEND_DRAIN_WAIT 3000
  1629. #endif
  1630. /**
  1631. * ol_txrx_bus_suspend() - bus suspend
  1632. *
  1633. * Ensure that ol_txrx is ready for bus suspend
  1634. *
  1635. * Return: CDF_STATUS
  1636. */
  1637. CDF_STATUS ol_txrx_bus_suspend(void)
  1638. {
  1639. return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
  1640. }
  1641. /**
  1642. * ol_txrx_bus_resume() - bus resume
  1643. *
  1644. * Dummy function for symetry
  1645. *
  1646. * Return: CDF_STATUS_SUCCESS
  1647. */
  1648. CDF_STATUS ol_txrx_bus_resume(void)
  1649. {
  1650. return CDF_STATUS_SUCCESS;
  1651. }
  1652. int ol_txrx_get_tx_pending(ol_txrx_pdev_handle pdev_handle)
  1653. {
  1654. struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
  1655. uint32_t total;
  1656. total = ol_tx_get_desc_global_pool_size(pdev);
  1657. return total - ol_tx_get_total_free_desc(pdev);
  1658. }
  1659. void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
  1660. {
  1661. ol_tx_desc_list tx_descs;
  1662. /* First let hif do the cdf_atomic_dec_and_test(&tx_desc->ref_cnt)
  1663. * then let htt do the cdf_atomic_dec_and_test(&tx_desc->ref_cnt)
  1664. * which is tha same with normal data send complete path*/
  1665. htt_tx_pending_discard(pdev_handle->htt_pdev);
  1666. TAILQ_INIT(&tx_descs);
  1667. ol_tx_queue_discard(pdev_handle, true, &tx_descs);
  1668. /* Discard Frames in Discard List */
  1669. ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
  1670. ol_tx_discard_target_frms(pdev_handle);
  1671. }
  1672. /*--- debug features --------------------------------------------------------*/
  1673. unsigned g_txrx_print_level = TXRX_PRINT_LEVEL_ERR; /* default */
  1674. void ol_txrx_print_level_set(unsigned level)
  1675. {
  1676. #ifndef TXRX_PRINT_ENABLE
  1677. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
  1678. "The driver is compiled without TXRX prints enabled.\n"
  1679. "To enable them, recompile with TXRX_PRINT_ENABLE defined");
  1680. #else
  1681. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
  1682. "TXRX printout level changed from %d to %d",
  1683. g_txrx_print_level, level);
  1684. g_txrx_print_level = level;
  1685. #endif
  1686. }
  1687. struct ol_txrx_stats_req_internal {
  1688. struct ol_txrx_stats_req base;
  1689. int serviced; /* state of this request */
  1690. int offset;
  1691. };
  1692. static inline
  1693. uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
  1694. {
  1695. return (uint64_t) ((size_t) req);
  1696. }
  1697. static inline
  1698. struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
  1699. {
  1700. return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
  1701. }
  1702. #ifdef ATH_PERF_PWR_OFFLOAD
  1703. void
  1704. ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
  1705. uint8_t cfg_stats_type, uint32_t cfg_val)
  1706. {
  1707. uint64_t dummy_cookie = 0;
  1708. htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
  1709. 0 /* reset mask */,
  1710. cfg_stats_type, cfg_val, dummy_cookie);
  1711. }
  1712. A_STATUS
  1713. ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev, struct ol_txrx_stats_req *req,
  1714. bool response_expected)
  1715. {
  1716. struct ol_txrx_pdev_t *pdev = vdev->pdev;
  1717. uint64_t cookie;
  1718. struct ol_txrx_stats_req_internal *non_volatile_req;
  1719. if (!pdev ||
  1720. req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
  1721. req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
  1722. return A_ERROR;
  1723. }
  1724. /*
  1725. * Allocate a non-transient stats request object.
  1726. * (The one provided as an argument is likely allocated on the stack.)
  1727. */
  1728. non_volatile_req = cdf_mem_malloc(sizeof(*non_volatile_req));
  1729. if (!non_volatile_req)
  1730. return A_NO_MEMORY;
  1731. /* copy the caller's specifications */
  1732. non_volatile_req->base = *req;
  1733. non_volatile_req->serviced = 0;
  1734. non_volatile_req->offset = 0;
  1735. /* use the non-volatile request object's address as the cookie */
  1736. cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
  1737. if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
  1738. req->stats_type_upload_mask,
  1739. req->stats_type_reset_mask,
  1740. HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
  1741. cookie)) {
  1742. cdf_mem_free(non_volatile_req);
  1743. return A_ERROR;
  1744. }
  1745. if (req->wait.blocking)
  1746. while (cdf_semaphore_acquire(pdev->osdev, req->wait.sem_ptr))
  1747. ;
  1748. if (response_expected == false)
  1749. cdf_mem_free(non_volatile_req);
  1750. return A_OK;
  1751. }
  1752. #endif
  1753. void
  1754. ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
  1755. uint64_t cookie, uint8_t *stats_info_list)
  1756. {
  1757. enum htt_dbg_stats_type type;
  1758. enum htt_dbg_stats_status status;
  1759. int length;
  1760. uint8_t *stats_data;
  1761. struct ol_txrx_stats_req_internal *req;
  1762. int more = 0;
  1763. req = ol_txrx_u64_to_stats_ptr(cookie);
  1764. do {
  1765. htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
  1766. &length, &stats_data);
  1767. if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
  1768. break;
  1769. if (status == HTT_DBG_STATS_STATUS_PRESENT ||
  1770. status == HTT_DBG_STATS_STATUS_PARTIAL) {
  1771. uint8_t *buf;
  1772. int bytes = 0;
  1773. if (status == HTT_DBG_STATS_STATUS_PARTIAL)
  1774. more = 1;
  1775. if (req->base.print.verbose || req->base.print.concise)
  1776. /* provide the header along with the data */
  1777. htt_t2h_stats_print(stats_info_list,
  1778. req->base.print.concise);
  1779. switch (type) {
  1780. case HTT_DBG_STATS_WAL_PDEV_TXRX:
  1781. bytes = sizeof(struct wlan_dbg_stats);
  1782. if (req->base.copy.buf) {
  1783. int lmt;
  1784. lmt = sizeof(struct wlan_dbg_stats);
  1785. if (req->base.copy.byte_limit < lmt)
  1786. lmt = req->base.copy.byte_limit;
  1787. buf = req->base.copy.buf + req->offset;
  1788. cdf_mem_copy(buf, stats_data, lmt);
  1789. }
  1790. break;
  1791. case HTT_DBG_STATS_RX_REORDER:
  1792. bytes = sizeof(struct rx_reorder_stats);
  1793. if (req->base.copy.buf) {
  1794. int lmt;
  1795. lmt = sizeof(struct rx_reorder_stats);
  1796. if (req->base.copy.byte_limit < lmt)
  1797. lmt = req->base.copy.byte_limit;
  1798. buf = req->base.copy.buf + req->offset;
  1799. cdf_mem_copy(buf, stats_data, lmt);
  1800. }
  1801. break;
  1802. case HTT_DBG_STATS_RX_RATE_INFO:
  1803. bytes = sizeof(wlan_dbg_rx_rate_info_t);
  1804. if (req->base.copy.buf) {
  1805. int lmt;
  1806. lmt = sizeof(wlan_dbg_rx_rate_info_t);
  1807. if (req->base.copy.byte_limit < lmt)
  1808. lmt = req->base.copy.byte_limit;
  1809. buf = req->base.copy.buf + req->offset;
  1810. cdf_mem_copy(buf, stats_data, lmt);
  1811. }
  1812. break;
  1813. case HTT_DBG_STATS_TX_RATE_INFO:
  1814. bytes = sizeof(wlan_dbg_tx_rate_info_t);
  1815. if (req->base.copy.buf) {
  1816. int lmt;
  1817. lmt = sizeof(wlan_dbg_tx_rate_info_t);
  1818. if (req->base.copy.byte_limit < lmt)
  1819. lmt = req->base.copy.byte_limit;
  1820. buf = req->base.copy.buf + req->offset;
  1821. cdf_mem_copy(buf, stats_data, lmt);
  1822. }
  1823. break;
  1824. case HTT_DBG_STATS_TX_PPDU_LOG:
  1825. bytes = 0;
  1826. /* TO DO: specify how many bytes are present */
  1827. /* TO DO: add copying to the requestor's buf */
  1828. case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
  1829. bytes = sizeof(struct rx_remote_buffer_mgmt_stats);
  1830. if (req->base.copy.buf) {
  1831. int limit;
  1832. limit = sizeof(struct rx_remote_buffer_mgmt_stats);
  1833. if (req->base.copy.byte_limit < limit) {
  1834. limit = req->base.copy.byte_limit;
  1835. }
  1836. buf = req->base.copy.buf + req->offset;
  1837. cdf_mem_copy(buf, stats_data, limit);
  1838. }
  1839. break;
  1840. case HTT_DBG_STATS_TXBF_INFO:
  1841. bytes = sizeof(struct wlan_dbg_txbf_data_stats);
  1842. if (req->base.copy.buf) {
  1843. int limit;
  1844. limit = sizeof(struct wlan_dbg_txbf_data_stats);
  1845. if (req->base.copy.byte_limit < limit)
  1846. limit = req->base.copy.byte_limit;
  1847. buf = req->base.copy.buf + req->offset;
  1848. cdf_mem_copy(buf, stats_data, limit);
  1849. }
  1850. break;
  1851. case HTT_DBG_STATS_SND_INFO:
  1852. bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
  1853. if (req->base.copy.buf) {
  1854. int limit;
  1855. limit = sizeof(struct wlan_dbg_txbf_snd_stats);
  1856. if (req->base.copy.byte_limit < limit)
  1857. limit = req->base.copy.byte_limit;
  1858. buf = req->base.copy.buf + req->offset;
  1859. cdf_mem_copy(buf, stats_data, limit);
  1860. }
  1861. break;
  1862. case HTT_DBG_STATS_TX_SELFGEN_INFO:
  1863. bytes = sizeof(struct wlan_dbg_tx_selfgen_stats);
  1864. if (req->base.copy.buf) {
  1865. int limit;
  1866. limit = sizeof(struct wlan_dbg_tx_selfgen_stats);
  1867. if (req->base.copy.byte_limit < limit)
  1868. limit = req->base.copy.byte_limit;
  1869. buf = req->base.copy.buf + req->offset;
  1870. cdf_mem_copy(buf, stats_data, limit);
  1871. }
  1872. break;
  1873. case HTT_DBG_STATS_ERROR_INFO:
  1874. bytes =
  1875. sizeof(struct wlan_dbg_wifi2_error_stats);
  1876. if (req->base.copy.buf) {
  1877. int limit;
  1878. limit =
  1879. sizeof(struct wlan_dbg_wifi2_error_stats);
  1880. if (req->base.copy.byte_limit < limit)
  1881. limit = req->base.copy.byte_limit;
  1882. buf = req->base.copy.buf + req->offset;
  1883. cdf_mem_copy(buf, stats_data, limit);
  1884. }
  1885. break;
  1886. case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
  1887. bytes =
  1888. sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
  1889. if (req->base.copy.buf) {
  1890. int limit;
  1891. limit = sizeof(struct
  1892. rx_txbf_musu_ndpa_pkts_stats);
  1893. if (req->base.copy.byte_limit < limit)
  1894. limit =
  1895. req->base.copy.byte_limit;
  1896. buf = req->base.copy.buf + req->offset;
  1897. cdf_mem_copy(buf, stats_data, limit);
  1898. }
  1899. break;
  1900. default:
  1901. break;
  1902. }
  1903. buf = req->base.copy.buf
  1904. ? req->base.copy.buf
  1905. : stats_data;
  1906. if (req->base.callback.fp)
  1907. req->base.callback.fp(req->base.callback.ctxt,
  1908. type, buf, bytes);
  1909. }
  1910. stats_info_list += length;
  1911. } while (1);
  1912. if (!more) {
  1913. if (req->base.wait.blocking)
  1914. cdf_semaphore_release(pdev->osdev,
  1915. req->base.wait.sem_ptr);
  1916. cdf_mem_free(req);
  1917. }
  1918. }
  1919. #ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
  1920. int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
  1921. {
  1922. if (debug_specs & TXRX_DBG_MASK_OBJS) {
  1923. #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
  1924. ol_txrx_pdev_display(vdev->pdev, 0);
  1925. #else
  1926. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
  1927. "The pdev,vdev,peer display functions are disabled.\n"
  1928. "To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
  1929. #endif
  1930. }
  1931. if (debug_specs & TXRX_DBG_MASK_STATS) {
  1932. ol_txrx_stats_display(vdev->pdev);
  1933. }
  1934. if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
  1935. #if defined(ENABLE_TXRX_PROT_ANALYZE)
  1936. ol_txrx_prot_ans_display(vdev->pdev);
  1937. #else
  1938. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
  1939. "txrx protocol analysis is disabled.\n"
  1940. "To enable it, recompile with "
  1941. "ENABLE_TXRX_PROT_ANALYZE defined");
  1942. #endif
  1943. }
  1944. if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
  1945. #if defined(ENABLE_RX_REORDER_TRACE)
  1946. ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
  1947. #else
  1948. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
  1949. "rx reorder seq num trace is disabled.\n"
  1950. "To enable it, recompile with "
  1951. "ENABLE_RX_REORDER_TRACE defined");
  1952. #endif
  1953. }
  1954. return 0;
  1955. }
  1956. #endif
  1957. int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
  1958. int max_subfrms_ampdu, int max_subfrms_amsdu)
  1959. {
  1960. return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
  1961. max_subfrms_ampdu, max_subfrms_amsdu);
  1962. }
  1963. #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
  1964. void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
  1965. {
  1966. struct ol_txrx_vdev_t *vdev;
  1967. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  1968. "%*s%s:\n", indent, " ", "txrx pdev");
  1969. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  1970. "%*spdev object: %p", indent + 4, " ", pdev);
  1971. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  1972. "%*svdev list:", indent + 4, " ");
  1973. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1974. ol_txrx_vdev_display(vdev, indent + 8);
  1975. }
  1976. ol_txrx_peer_find_display(pdev, indent + 4);
  1977. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  1978. "%*stx desc pool: %d elems @ %p", indent + 4, " ",
  1979. pdev->tx_desc.pool_size, pdev->tx_desc.array);
  1980. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW, " ");
  1981. htt_display(pdev->htt_pdev, indent);
  1982. }
  1983. void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
  1984. {
  1985. struct ol_txrx_peer_t *peer;
  1986. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  1987. "%*stxrx vdev: %p\n", indent, " ", vdev);
  1988. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  1989. "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
  1990. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  1991. "%*sMAC addr: %d:%d:%d:%d:%d:%d",
  1992. indent + 4, " ",
  1993. vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
  1994. vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
  1995. vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
  1996. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  1997. "%*speer list:", indent + 4, " ");
  1998. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1999. ol_txrx_peer_display(peer, indent + 8);
  2000. }
  2001. }
  2002. void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
  2003. {
  2004. int i;
  2005. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  2006. "%*stxrx peer: %p", indent, " ", peer);
  2007. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
  2008. if (peer->peer_ids[i] != HTT_INVALID_PEER) {
  2009. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
  2010. "%*sID: %d", indent + 4, " ",
  2011. peer->peer_ids[i]);
  2012. }
  2013. }
  2014. }
  2015. #endif /* TXRX_DEBUG_LEVEL */
  2016. #if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
  2017. void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
  2018. {
  2019. int msdu_idx;
  2020. int seg_idx;
  2021. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2022. "TSO pkts %lld, bytes %lld\n",
  2023. pdev->stats.pub.tx.tso.tso_pkts.pkts,
  2024. pdev->stats.pub.tx.tso.tso_pkts.bytes);
  2025. for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
  2026. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2027. "curr msdu idx: %d curr seg idx: %d num segs %d\n",
  2028. TXRX_STATS_TSO_MSDU_IDX(pdev),
  2029. TXRX_STATS_TSO_SEG_IDX(pdev),
  2030. TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx));
  2031. for (seg_idx = 0;
  2032. ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx)) &&
  2033. (seg_idx < NUM_MAX_TSO_SEGS));
  2034. seg_idx++) {
  2035. struct cdf_tso_seg_t tso_seg =
  2036. TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
  2037. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2038. "msdu idx: %d seg idx: %d\n",
  2039. msdu_idx, seg_idx);
  2040. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2041. "tso_enable: %d\n",
  2042. tso_seg.tso_flags.tso_enable);
  2043. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2044. "fin %d syn %d rst %d psh %d ack %d\n"
  2045. "urg %d ece %d cwr %d ns %d\n",
  2046. tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
  2047. tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
  2048. tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
  2049. tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
  2050. tso_seg.tso_flags.ns);
  2051. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2052. "tcp_seq_num: 0x%x ip_id: %d\n",
  2053. tso_seg.tso_flags.tcp_seq_num,
  2054. tso_seg.tso_flags.ip_id);
  2055. }
  2056. }
  2057. }
  2058. #endif
  2059. /**
  2060. * ol_txrx_stats() - update ol layer stats
  2061. * @vdev_id: vdev_id
  2062. * @buffer: pointer to buffer
  2063. * @buf_len: length of the buffer
  2064. *
  2065. * Return: length of string
  2066. */
  2067. int
  2068. ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned buf_len)
  2069. {
  2070. uint32_t len = 0;
  2071. ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
  2072. if (!vdev) {
  2073. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2074. "%s: vdev is NULL", __func__);
  2075. snprintf(buffer, buf_len, "vdev not found");
  2076. return len;
  2077. }
  2078. len = scnprintf(buffer, buf_len,
  2079. "\nTXRX stats:\n"
  2080. "\nllQueue State : %s"
  2081. "\n pause %u unpause %u"
  2082. "\n overflow %u"
  2083. "\nllQueue timer state : %s\n",
  2084. ((vdev->ll_pause.is_q_paused == false) ? "UNPAUSED" : "PAUSED"),
  2085. vdev->ll_pause.q_pause_cnt,
  2086. vdev->ll_pause.q_unpause_cnt,
  2087. vdev->ll_pause.q_overflow_cnt,
  2088. ((vdev->ll_pause.is_q_timer_on == false)
  2089. ? "NOT-RUNNING" : "RUNNING"));
  2090. return len;
  2091. }
  2092. void ol_txrx_stats_display(ol_txrx_pdev_handle pdev)
  2093. {
  2094. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR, "txrx stats:");
  2095. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2096. " tx: sent %lld msdus (%lld B), "
  2097. " rejected %lld (%lld B), dropped %lld (%lld B)",
  2098. pdev->stats.pub.tx.delivered.pkts,
  2099. pdev->stats.pub.tx.delivered.bytes,
  2100. pdev->stats.pub.tx.dropped.host_reject.pkts,
  2101. pdev->stats.pub.tx.dropped.host_reject.bytes,
  2102. pdev->stats.pub.tx.dropped.download_fail.pkts
  2103. + pdev->stats.pub.tx.dropped.target_discard.pkts
  2104. + pdev->stats.pub.tx.dropped.no_ack.pkts,
  2105. pdev->stats.pub.tx.dropped.download_fail.bytes
  2106. + pdev->stats.pub.tx.dropped.target_discard.bytes
  2107. + pdev->stats.pub.tx.dropped.no_ack.bytes);
  2108. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2109. " download fail: %lld (%lld B), "
  2110. "target discard: %lld (%lld B), "
  2111. "no ack: %lld (%lld B)",
  2112. pdev->stats.pub.tx.dropped.download_fail.pkts,
  2113. pdev->stats.pub.tx.dropped.download_fail.bytes,
  2114. pdev->stats.pub.tx.dropped.target_discard.pkts,
  2115. pdev->stats.pub.tx.dropped.target_discard.bytes,
  2116. pdev->stats.pub.tx.dropped.no_ack.pkts,
  2117. pdev->stats.pub.tx.dropped.no_ack.bytes);
  2118. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2119. "Tx completion per interrupt:\n"
  2120. "Single Packet %d\n"
  2121. " 2-10 Packets %d\n"
  2122. "11-20 Packets %d\n"
  2123. "21-30 Packets %d\n"
  2124. "31-40 Packets %d\n"
  2125. "41-50 Packets %d\n"
  2126. "51-60 Packets %d\n"
  2127. " 60+ Packets %d\n",
  2128. pdev->stats.pub.tx.comp_histogram.pkts_1,
  2129. pdev->stats.pub.tx.comp_histogram.pkts_2_10,
  2130. pdev->stats.pub.tx.comp_histogram.pkts_11_20,
  2131. pdev->stats.pub.tx.comp_histogram.pkts_21_30,
  2132. pdev->stats.pub.tx.comp_histogram.pkts_31_40,
  2133. pdev->stats.pub.tx.comp_histogram.pkts_41_50,
  2134. pdev->stats.pub.tx.comp_histogram.pkts_51_60,
  2135. pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
  2136. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2137. " rx: %lld ppdus, %lld mpdus, %lld msdus, %lld bytes, %lld errs",
  2138. pdev->stats.priv.rx.normal.ppdus,
  2139. pdev->stats.priv.rx.normal.mpdus,
  2140. pdev->stats.pub.rx.delivered.pkts,
  2141. pdev->stats.pub.rx.delivered.bytes,
  2142. pdev->stats.priv.rx.err.mpdu_bad);
  2143. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2144. " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
  2145. pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
  2146. pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
  2147. pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
  2148. }
  2149. void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
  2150. {
  2151. cdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
  2152. }
  2153. #if defined(ENABLE_TXRX_PROT_ANALYZE)
  2154. void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
  2155. {
  2156. ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
  2157. ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
  2158. }
  2159. #endif /* ENABLE_TXRX_PROT_ANALYZE */
  2160. #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
  2161. int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
  2162. {
  2163. return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
  2164. OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
  2165. }
  2166. #endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
  2167. #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
  2168. A_STATUS
  2169. ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
  2170. ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
  2171. {
  2172. cdf_assert(pdev && peer && stats);
  2173. cdf_spin_lock_bh(&pdev->peer_stat_mutex);
  2174. cdf_mem_copy(stats, &peer->stats, sizeof(*stats));
  2175. cdf_spin_unlock_bh(&pdev->peer_stat_mutex);
  2176. return A_OK;
  2177. }
  2178. #endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
  2179. void ol_vdev_rx_set_intrabss_fwd(ol_txrx_vdev_handle vdev, bool val)
  2180. {
  2181. if (NULL == vdev)
  2182. return;
  2183. vdev->disable_intrabss_fwd = val;
  2184. }
  2185. #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
  2186. /**
  2187. * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
  2188. * @sta_id: sta_id
  2189. *
  2190. * Return: vdev handle
  2191. * NULL if not found.
  2192. */
  2193. static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
  2194. {
  2195. struct ol_txrx_peer_t *peer = NULL;
  2196. ol_txrx_pdev_handle pdev = NULL;
  2197. if (sta_id >= WLAN_MAX_STA_COUNT) {
  2198. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2199. "Invalid sta id passed");
  2200. return NULL;
  2201. }
  2202. pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2203. if (!pdev) {
  2204. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2205. "PDEV not found for sta_id [%d]", sta_id);
  2206. return NULL;
  2207. }
  2208. peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
  2209. if (!peer) {
  2210. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2211. "PEER [%d] not found", sta_id);
  2212. return NULL;
  2213. }
  2214. return peer->vdev;
  2215. }
  2216. /**
  2217. * ol_txrx_register_tx_flow_control() - register tx flow control callback
  2218. * @vdev_id: vdev_id
  2219. * @flowControl: flow control callback
  2220. * @osif_fc_ctx: callback context
  2221. *
  2222. * Return: 0 for sucess or error code
  2223. */
  2224. int ol_txrx_register_tx_flow_control (uint8_t vdev_id,
  2225. ol_txrx_tx_flow_control_fp flowControl,
  2226. void *osif_fc_ctx)
  2227. {
  2228. ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
  2229. if (NULL == vdev) {
  2230. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2231. "%s: Invalid vdev_id %d", __func__, vdev_id);
  2232. return -EINVAL;
  2233. }
  2234. cdf_spin_lock_bh(&vdev->flow_control_lock);
  2235. vdev->osif_flow_control_cb = flowControl;
  2236. vdev->osif_fc_ctx = osif_fc_ctx;
  2237. cdf_spin_unlock_bh(&vdev->flow_control_lock);
  2238. return 0;
  2239. }
  2240. /**
  2241. * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control callback
  2242. * @vdev_id: vdev_id
  2243. *
  2244. * Return: 0 for success or error code
  2245. */
  2246. int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
  2247. {
  2248. ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
  2249. if (NULL == vdev) {
  2250. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2251. "%s: Invalid vdev_id", __func__);
  2252. return -EINVAL;
  2253. }
  2254. cdf_spin_lock_bh(&vdev->flow_control_lock);
  2255. vdev->osif_flow_control_cb = NULL;
  2256. vdev->osif_fc_ctx = NULL;
  2257. cdf_spin_unlock_bh(&vdev->flow_control_lock);
  2258. return 0;
  2259. }
  2260. /**
  2261. * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
  2262. * @sta_id: sta id
  2263. * @low_watermark: low watermark
  2264. * @high_watermark_offset: high watermark offset value
  2265. *
  2266. * Return: true/false
  2267. */
  2268. bool
  2269. ol_txrx_get_tx_resource(uint8_t sta_id,
  2270. unsigned int low_watermark,
  2271. unsigned int high_watermark_offset)
  2272. {
  2273. ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
  2274. if (NULL == vdev) {
  2275. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2276. "%s: Invalid sta_id %d", __func__, sta_id);
  2277. /* Return true so caller do not understand that resource
  2278. * is less than low_watermark.
  2279. * sta_id validation will be done in ol_tx_send_data_frame
  2280. * and if sta_id is not registered then host will drop
  2281. * packet.
  2282. */
  2283. return true;
  2284. }
  2285. cdf_spin_lock_bh(&vdev->pdev->tx_mutex);
  2286. if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
  2287. vdev->tx_fl_lwm = (uint16_t) low_watermark;
  2288. vdev->tx_fl_hwm =
  2289. (uint16_t) (low_watermark + high_watermark_offset);
  2290. /* Not enough free resource, stop TX OS Q */
  2291. cdf_atomic_set(&vdev->os_q_paused, 1);
  2292. cdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
  2293. return false;
  2294. }
  2295. cdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
  2296. return true;
  2297. }
  2298. /**
  2299. * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
  2300. * @vdev_id: vdev id
  2301. * @pause_q_depth: pause queue depth
  2302. *
  2303. * Return: 0 for success or error code
  2304. */
  2305. int
  2306. ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
  2307. {
  2308. ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
  2309. if (NULL == vdev) {
  2310. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2311. "%s: Invalid vdev_id %d", __func__, vdev_id);
  2312. return -EINVAL;
  2313. }
  2314. cdf_spin_lock_bh(&vdev->ll_pause.mutex);
  2315. vdev->ll_pause.max_q_depth = pause_q_depth;
  2316. cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
  2317. return 0;
  2318. }
  2319. /**
  2320. * ol_txrx_flow_control_cb() - call osif flow control callback
  2321. * @vdev: vdev handle
  2322. * @tx_resume: tx resume flag
  2323. *
  2324. * Return: none
  2325. */
  2326. inline void ol_txrx_flow_control_cb(ol_txrx_vdev_handle vdev,
  2327. bool tx_resume)
  2328. {
  2329. cdf_spin_lock_bh(&vdev->flow_control_lock);
  2330. if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
  2331. vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
  2332. cdf_spin_unlock_bh(&vdev->flow_control_lock);
  2333. return;
  2334. }
  2335. #endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
  2336. #ifdef IPA_OFFLOAD
  2337. /**
  2338. * ol_txrx_ipa_uc_get_resource() - Client request resource information
  2339. * @pdev: handle to the HTT instance
  2340. * @ce_sr_base_paddr: copy engine source ring base physical address
  2341. * @ce_sr_ring_size: copy engine source ring size
  2342. * @ce_reg_paddr: copy engine register physical address
  2343. * @tx_comp_ring_base_paddr: tx comp ring base physical address
  2344. * @tx_comp_ring_size: tx comp ring size
  2345. * @tx_num_alloc_buffer: number of allocated tx buffer
  2346. * @rx_rdy_ring_base_paddr: rx ready ring base physical address
  2347. * @rx_rdy_ring_size: rx ready ring size
  2348. * @rx_proc_done_idx_paddr: rx process done index physical address
  2349. * @rx_proc_done_idx_vaddr: rx process done index virtual address
  2350. * @rx2_rdy_ring_base_paddr: rx done ring base physical address
  2351. * @rx2_rdy_ring_size: rx done ring size
  2352. * @rx2_proc_done_idx_paddr: rx done index physical address
  2353. * @rx2_proc_done_idx_vaddr: rx done index virtual address
  2354. *
  2355. * OL client will reuqest IPA UC related resource information
  2356. * Resource information will be distributted to IPA module
  2357. * All of the required resources should be pre-allocated
  2358. *
  2359. * Return: none
  2360. */
  2361. void
  2362. ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
  2363. cdf_dma_addr_t *ce_sr_base_paddr,
  2364. uint32_t *ce_sr_ring_size,
  2365. cdf_dma_addr_t *ce_reg_paddr,
  2366. cdf_dma_addr_t *tx_comp_ring_base_paddr,
  2367. uint32_t *tx_comp_ring_size,
  2368. uint32_t *tx_num_alloc_buffer,
  2369. cdf_dma_addr_t *rx_rdy_ring_base_paddr,
  2370. uint32_t *rx_rdy_ring_size,
  2371. cdf_dma_addr_t *rx_proc_done_idx_paddr,
  2372. void **rx_proc_done_idx_vaddr,
  2373. cdf_dma_addr_t *rx2_rdy_ring_base_paddr,
  2374. uint32_t *rx2_rdy_ring_size,
  2375. cdf_dma_addr_t *rx2_proc_done_idx2_paddr,
  2376. void **rx2_proc_done_idx2_vaddr)
  2377. {
  2378. htt_ipa_uc_get_resource(pdev->htt_pdev,
  2379. ce_sr_base_paddr,
  2380. ce_sr_ring_size,
  2381. ce_reg_paddr,
  2382. tx_comp_ring_base_paddr,
  2383. tx_comp_ring_size,
  2384. tx_num_alloc_buffer,
  2385. rx_rdy_ring_base_paddr,
  2386. rx_rdy_ring_size, rx_proc_done_idx_paddr,
  2387. rx_proc_done_idx_vaddr,
  2388. rx2_rdy_ring_base_paddr,
  2389. rx2_rdy_ring_size, rx2_proc_done_idx2_paddr,
  2390. rx2_proc_done_idx2_vaddr);
  2391. }
  2392. /**
  2393. * ol_txrx_ipa_uc_set_doorbell_paddr() - Client set IPA UC doorbell register
  2394. * @pdev: handle to the HTT instance
  2395. * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
  2396. * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
  2397. *
  2398. * IPA UC let know doorbell register physical address
  2399. * WLAN firmware will use this physical address to notify IPA UC
  2400. *
  2401. * Return: none
  2402. */
  2403. void
  2404. ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
  2405. cdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
  2406. cdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
  2407. {
  2408. htt_ipa_uc_set_doorbell_paddr(pdev->htt_pdev,
  2409. ipa_tx_uc_doorbell_paddr,
  2410. ipa_rx_uc_doorbell_paddr);
  2411. }
  2412. /**
  2413. * ol_txrx_ipa_uc_set_active() - Client notify IPA UC data path active or not
  2414. * @pdev: handle to the HTT instance
  2415. * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
  2416. * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
  2417. *
  2418. * IPA UC let know doorbell register physical address
  2419. * WLAN firmware will use this physical address to notify IPA UC
  2420. *
  2421. * Return: none
  2422. */
  2423. void
  2424. ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev, bool uc_active, bool is_tx)
  2425. {
  2426. htt_h2t_ipa_uc_set_active(pdev->htt_pdev, uc_active, is_tx);
  2427. }
  2428. /**
  2429. * ol_txrx_ipa_uc_fw_op_event_handler() - opcode event handler
  2430. * @context: pdev context
  2431. * @rxpkt: received packet
  2432. * @staid: peer id
  2433. *
  2434. * Return: None
  2435. */
  2436. void ol_txrx_ipa_uc_fw_op_event_handler(void *context,
  2437. void *rxpkt,
  2438. uint16_t staid)
  2439. {
  2440. ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)context;
  2441. if (cdf_unlikely(!pdev)) {
  2442. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2443. "%s: Invalid context", __func__);
  2444. cdf_mem_free(rxpkt);
  2445. return;
  2446. }
  2447. if (pdev->ipa_uc_op_cb) {
  2448. pdev->ipa_uc_op_cb(rxpkt, pdev->osif_dev);
  2449. } else {
  2450. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2451. "%s: ipa_uc_op_cb NULL", __func__);
  2452. cdf_mem_free(rxpkt);
  2453. }
  2454. }
  2455. #ifdef QCA_CONFIG_SMP
  2456. /**
  2457. * ol_txrx_ipa_uc_op_response() - Handle OP command response from firmware
  2458. * @pdev: handle to the HTT instance
  2459. * @op_msg: op response message from firmware
  2460. *
  2461. * Return: none
  2462. */
  2463. void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg)
  2464. {
  2465. p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
  2466. struct cds_ol_rx_pkt *pkt;
  2467. if (cdf_unlikely(!sched_ctx))
  2468. return;
  2469. pkt = cds_alloc_ol_rx_pkt(sched_ctx);
  2470. if (cdf_unlikely(!pkt)) {
  2471. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2472. "%s: Not able to allocate context", __func__);
  2473. return;
  2474. }
  2475. pkt->callback = (cds_ol_rx_thread_cb) ol_txrx_ipa_uc_fw_op_event_handler;
  2476. pkt->context = pdev;
  2477. pkt->Rxpkt = (void *)op_msg;
  2478. pkt->staId = 0;
  2479. cds_indicate_rxpkt(sched_ctx, pkt);
  2480. }
  2481. #else
  2482. void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev,
  2483. uint8_t *op_msg)
  2484. {
  2485. if (pdev->ipa_uc_op_cb) {
  2486. pdev->ipa_uc_op_cb(op_msg, pdev->osif_dev);
  2487. } else {
  2488. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2489. "%s: IPA callback function is not registered", __func__);
  2490. cdf_mem_free(op_msg);
  2491. return;
  2492. }
  2493. }
  2494. #endif
  2495. /**
  2496. * ol_txrx_ipa_uc_register_op_cb() - Register OP handler function
  2497. * @pdev: handle to the HTT instance
  2498. * @op_cb: handler function pointer
  2499. * @osif_dev: register client context
  2500. *
  2501. * Return: none
  2502. */
  2503. void ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
  2504. ipa_uc_op_cb_type op_cb, void *osif_dev)
  2505. {
  2506. pdev->ipa_uc_op_cb = op_cb;
  2507. pdev->osif_dev = osif_dev;
  2508. }
  2509. /**
  2510. * ol_txrx_ipa_uc_get_stat() - Get firmware wdi status
  2511. * @pdev: handle to the HTT instance
  2512. *
  2513. * Return: none
  2514. */
  2515. void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev)
  2516. {
  2517. htt_h2t_ipa_uc_get_stats(pdev->htt_pdev);
  2518. }
  2519. #endif /* IPA_UC_OFFLOAD */
  2520. void ol_txrx_display_stats(uint16_t value)
  2521. {
  2522. ol_txrx_pdev_handle pdev;
  2523. pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2524. if (!pdev) {
  2525. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2526. "%s: pdev is NULL", __func__);
  2527. return;
  2528. }
  2529. switch (value) {
  2530. case WLAN_TXRX_STATS:
  2531. ol_txrx_stats_display(pdev);
  2532. break;
  2533. case WLAN_TXRX_TSO_STATS:
  2534. #if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
  2535. ol_txrx_stats_display_tso(pdev);
  2536. #else
  2537. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2538. "%s: TSO not supported", __func__);
  2539. #endif
  2540. break;
  2541. case WLAN_DUMP_TX_FLOW_POOL_INFO:
  2542. ol_tx_dump_flow_pool_info();
  2543. break;
  2544. case WLAN_TXRX_DESC_STATS:
  2545. cdf_nbuf_tx_desc_count_display();
  2546. break;
  2547. default:
  2548. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2549. "%s: Unknown value", __func__);
  2550. break;
  2551. }
  2552. }
  2553. void ol_txrx_clear_stats(uint16_t value)
  2554. {
  2555. ol_txrx_pdev_handle pdev;
  2556. pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2557. if (!pdev) {
  2558. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2559. "%s: pdev is NULL", __func__);
  2560. return;
  2561. }
  2562. switch (value) {
  2563. case WLAN_TXRX_STATS:
  2564. ol_txrx_stats_clear(pdev);
  2565. break;
  2566. case WLAN_DUMP_TX_FLOW_POOL_INFO:
  2567. ol_tx_clear_flow_pool_stats();
  2568. break;
  2569. case WLAN_TXRX_DESC_STATS:
  2570. cdf_nbuf_tx_desc_count_clear();
  2571. break;
  2572. default:
  2573. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2574. "%s: Unknown value", __func__);
  2575. break;
  2576. }
  2577. }
  2578. /**
  2579. * ol_rx_data_cb() - data rx callback
  2580. * @peer: peer
  2581. * @buf_list: buffer list
  2582. *
  2583. * Return: None
  2584. */
  2585. static void ol_rx_data_cb(struct ol_txrx_peer_t *peer,
  2586. cdf_nbuf_t buf_list)
  2587. {
  2588. void *cds_ctx = cds_get_global_context();
  2589. cdf_nbuf_t buf, next_buf;
  2590. CDF_STATUS ret;
  2591. ol_rx_callback_fp data_rx = NULL;
  2592. if (cdf_unlikely(!cds_ctx))
  2593. goto free_buf;
  2594. cdf_spin_lock_bh(&peer->peer_info_lock);
  2595. if (cdf_unlikely(!(peer->state >= ol_txrx_peer_state_conn))) {
  2596. cdf_spin_unlock_bh(&peer->peer_info_lock);
  2597. goto free_buf;
  2598. }
  2599. data_rx = peer->osif_rx;
  2600. cdf_spin_unlock_bh(&peer->peer_info_lock);
  2601. cdf_spin_lock_bh(&peer->bufq_lock);
  2602. if (!list_empty(&peer->cached_bufq)) {
  2603. cdf_spin_unlock_bh(&peer->bufq_lock);
  2604. /* Flush the cached frames to HDD before passing new rx frame */
  2605. ol_txrx_flush_rx_frames(peer, 0);
  2606. } else
  2607. cdf_spin_unlock_bh(&peer->bufq_lock);
  2608. buf = buf_list;
  2609. while (buf) {
  2610. next_buf = cdf_nbuf_queue_next(buf);
  2611. cdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
  2612. ret = data_rx(cds_ctx, buf, peer->local_id);
  2613. if (ret != CDF_STATUS_SUCCESS) {
  2614. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Frame Rx to HDD failed");
  2615. cdf_nbuf_free(buf);
  2616. }
  2617. buf = next_buf;
  2618. }
  2619. return;
  2620. free_buf:
  2621. TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%s:Dropping frames", __func__);
  2622. buf = buf_list;
  2623. while (buf) {
  2624. next_buf = cdf_nbuf_queue_next(buf);
  2625. cdf_nbuf_free(buf);
  2626. buf = next_buf;
  2627. }
  2628. }
  2629. /**
  2630. * ol_rx_data_process() - process rx frame
  2631. * @peer: peer
  2632. * @rx_buf_list: rx buffer list
  2633. *
  2634. * Return: None
  2635. */
  2636. void ol_rx_data_process(struct ol_txrx_peer_t *peer,
  2637. cdf_nbuf_t rx_buf_list)
  2638. {
  2639. /* Firmware data path active response will use shim RX thread
  2640. * T2H MSG running on SIRQ context,
  2641. * IPA kernel module API should not be called on SIRQ CTXT */
  2642. cdf_nbuf_t buf, next_buf;
  2643. ol_rx_callback_fp data_rx = NULL;
  2644. ol_txrx_pdev_handle pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2645. if ((!peer) || (!pdev)) {
  2646. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "peer/pdev is NULL");
  2647. goto drop_rx_buf;
  2648. }
  2649. cdf_spin_lock_bh(&peer->peer_info_lock);
  2650. if (peer->state >= ol_txrx_peer_state_conn)
  2651. data_rx = peer->osif_rx;
  2652. cdf_spin_unlock_bh(&peer->peer_info_lock);
  2653. /*
  2654. * If there is a data frame from peer before the peer is
  2655. * registered for data service, enqueue them on to pending queue
  2656. * which will be flushed to HDD once that station is registered.
  2657. */
  2658. if (!data_rx) {
  2659. struct ol_rx_cached_buf *cache_buf;
  2660. buf = rx_buf_list;
  2661. while (buf) {
  2662. next_buf = cdf_nbuf_queue_next(buf);
  2663. cache_buf = cdf_mem_malloc(sizeof(*cache_buf));
  2664. if (!cache_buf) {
  2665. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  2666. "Failed to allocate buf to cache the rx frames");
  2667. cdf_nbuf_free(buf);
  2668. } else {
  2669. /* Add NULL terminator */
  2670. cdf_nbuf_set_next(buf, NULL);
  2671. cache_buf->buf = buf;
  2672. cdf_spin_lock_bh(&peer->bufq_lock);
  2673. list_add_tail(&cache_buf->list,
  2674. &peer->cached_bufq);
  2675. cdf_spin_unlock_bh(&peer->bufq_lock);
  2676. }
  2677. buf = next_buf;
  2678. }
  2679. } else {
  2680. #ifdef QCA_CONFIG_SMP
  2681. /*
  2682. * If the kernel is SMP, schedule rx thread to
  2683. * better use multicores.
  2684. */
  2685. if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
  2686. ol_rx_data_cb(peer, rx_buf_list);
  2687. } else {
  2688. p_cds_sched_context sched_ctx =
  2689. get_cds_sched_ctxt();
  2690. struct cds_ol_rx_pkt *pkt;
  2691. if (unlikely(!sched_ctx))
  2692. goto drop_rx_buf;
  2693. pkt = cds_alloc_ol_rx_pkt(sched_ctx);
  2694. if (!pkt) {
  2695. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
  2696. "No available Rx message buffer");
  2697. goto drop_rx_buf;
  2698. }
  2699. pkt->callback = (cds_ol_rx_thread_cb)
  2700. ol_rx_data_cb;
  2701. pkt->context = (void *)peer;
  2702. pkt->Rxpkt = (void *)rx_buf_list;
  2703. pkt->staId = peer->local_id;
  2704. cds_indicate_rxpkt(sched_ctx, pkt);
  2705. }
  2706. #else /* QCA_CONFIG_SMP */
  2707. ol_rx_data_cb(peer, rx_buf_list, 0);
  2708. #endif /* QCA_CONFIG_SMP */
  2709. }
  2710. return;
  2711. drop_rx_buf:
  2712. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Dropping rx packets");
  2713. buf = rx_buf_list;
  2714. while (buf) {
  2715. next_buf = cdf_nbuf_queue_next(buf);
  2716. cdf_nbuf_free(buf);
  2717. buf = next_buf;
  2718. }
  2719. }
  2720. /**
  2721. * ol_txrx_register_peer() - register peer
  2722. * @rxcb: rx callback
  2723. * @sta_desc: sta descriptor
  2724. *
  2725. * Return: CDF Status
  2726. */
  2727. CDF_STATUS ol_txrx_register_peer(ol_rx_callback_fp rxcb,
  2728. struct ol_txrx_desc_type *sta_desc)
  2729. {
  2730. struct ol_txrx_peer_t *peer;
  2731. struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2732. union ol_txrx_peer_update_param_t param;
  2733. struct privacy_exemption privacy_filter;
  2734. if (!pdev) {
  2735. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Pdev is NULL");
  2736. return CDF_STATUS_E_INVAL;
  2737. }
  2738. if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
  2739. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid sta id :%d",
  2740. sta_desc->sta_id);
  2741. return CDF_STATUS_E_INVAL;
  2742. }
  2743. peer = ol_txrx_peer_find_by_local_id(pdev, sta_desc->sta_id);
  2744. if (!peer)
  2745. return CDF_STATUS_E_FAULT;
  2746. cdf_spin_lock_bh(&peer->peer_info_lock);
  2747. peer->osif_rx = rxcb;
  2748. peer->state = ol_txrx_peer_state_conn;
  2749. cdf_spin_unlock_bh(&peer->peer_info_lock);
  2750. param.qos_capable = sta_desc->is_qos_enabled;
  2751. ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
  2752. ol_txrx_peer_update_qos_capable);
  2753. if (sta_desc->is_wapi_supported) {
  2754. /*Privacy filter to accept unencrypted WAI frames */
  2755. privacy_filter.ether_type = ETHERTYPE_WAI;
  2756. privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
  2757. privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
  2758. ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
  2759. }
  2760. ol_txrx_flush_rx_frames(peer, 0);
  2761. return CDF_STATUS_SUCCESS;
  2762. }
  2763. /**
  2764. * ol_txrx_clear_peer() - clear peer
  2765. * @sta_id: sta id
  2766. *
  2767. * Return: CDF Status
  2768. */
  2769. CDF_STATUS ol_txrx_clear_peer(uint8_t sta_id)
  2770. {
  2771. struct ol_txrx_peer_t *peer;
  2772. struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2773. if (!pdev) {
  2774. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find pdev!",
  2775. __func__);
  2776. return CDF_STATUS_E_FAILURE;
  2777. }
  2778. if (sta_id >= WLAN_MAX_STA_COUNT) {
  2779. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid sta id %d", sta_id);
  2780. return CDF_STATUS_E_INVAL;
  2781. }
  2782. #ifdef QCA_CONFIG_SMP
  2783. {
  2784. p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
  2785. /* Drop pending Rx frames in CDS */
  2786. if (sched_ctx)
  2787. cds_drop_rxpkt_by_staid(sched_ctx, sta_id);
  2788. }
  2789. #endif
  2790. peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
  2791. if (!peer)
  2792. return CDF_STATUS_E_FAULT;
  2793. /* Purge the cached rx frame queue */
  2794. ol_txrx_flush_rx_frames(peer, 1);
  2795. cdf_spin_lock_bh(&peer->peer_info_lock);
  2796. peer->osif_rx = NULL;
  2797. peer->state = ol_txrx_peer_state_disc;
  2798. cdf_spin_unlock_bh(&peer->peer_info_lock);
  2799. return CDF_STATUS_SUCCESS;
  2800. }
  2801. /**
  2802. * ol_txrx_register_ocb_peer - Function to register the OCB peer
  2803. * @cds_ctx: Pointer to the global OS context
  2804. * @mac_addr: MAC address of the self peer
  2805. * @peer_id: Pointer to the peer ID
  2806. *
  2807. * Return: CDF_STATUS_SUCCESS on success, CDF_STATUS_E_FAILURE on failure
  2808. */
  2809. CDF_STATUS ol_txrx_register_ocb_peer(void *cds_ctx, uint8_t *mac_addr,
  2810. uint8_t *peer_id)
  2811. {
  2812. ol_txrx_pdev_handle pdev;
  2813. ol_txrx_peer_handle peer;
  2814. if (!cds_ctx) {
  2815. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Invalid context",
  2816. __func__);
  2817. return CDF_STATUS_E_FAILURE;
  2818. }
  2819. pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2820. if (!pdev) {
  2821. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find pdev!",
  2822. __func__);
  2823. return CDF_STATUS_E_FAILURE;
  2824. }
  2825. peer = ol_txrx_find_peer_by_addr(pdev, mac_addr, peer_id);
  2826. if (!peer) {
  2827. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find OCB peer!",
  2828. __func__);
  2829. return CDF_STATUS_E_FAILURE;
  2830. }
  2831. ol_txrx_set_ocb_peer(pdev, peer);
  2832. /* Set peer state to connected */
  2833. ol_txrx_peer_state_update(pdev, peer->mac_addr.raw,
  2834. ol_txrx_peer_state_auth);
  2835. return CDF_STATUS_SUCCESS;
  2836. }
  2837. /**
  2838. * ol_txrx_set_ocb_peer - Function to store the OCB peer
  2839. * @pdev: Handle to the HTT instance
  2840. * @peer: Pointer to the peer
  2841. */
  2842. void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
  2843. struct ol_txrx_peer_t *peer)
  2844. {
  2845. if (pdev == NULL)
  2846. return;
  2847. pdev->ocb_peer = peer;
  2848. pdev->ocb_peer_valid = (NULL != peer);
  2849. }
  2850. /**
  2851. * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
  2852. * @pdev: Handle to the HTT instance
  2853. * @peer: Pointer to the returned peer
  2854. *
  2855. * Return: true if the peer is valid, false if not
  2856. */
  2857. bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
  2858. struct ol_txrx_peer_t **peer)
  2859. {
  2860. int rc;
  2861. if ((pdev == NULL) || (peer == NULL)) {
  2862. rc = false;
  2863. goto exit;
  2864. }
  2865. if (pdev->ocb_peer_valid) {
  2866. *peer = pdev->ocb_peer;
  2867. rc = true;
  2868. } else {
  2869. rc = false;
  2870. }
  2871. exit:
  2872. return rc;
  2873. }
  2874. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  2875. /**
  2876. * ol_txrx_register_pause_cb() - register pause callback
  2877. * @pause_cb: pause callback
  2878. *
  2879. * Return: CDF status
  2880. */
  2881. CDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
  2882. {
  2883. struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2884. if (!pdev || !pause_cb) {
  2885. TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "pdev or pause_cb is NULL");
  2886. return CDF_STATUS_E_INVAL;
  2887. }
  2888. pdev->pause_cb = pause_cb;
  2889. return CDF_STATUS_SUCCESS;
  2890. }
  2891. #endif
  2892. #if defined(FEATURE_LRO)
  2893. /**
  2894. * ol_txrx_lro_flush_handler() - LRO flush handler
  2895. * @context: dev handle
  2896. * @rxpkt: rx data
  2897. * @staid: station id
  2898. *
  2899. * This function handles an LRO flush indication.
  2900. * If the rx thread is enabled, it will be invoked by the rx
  2901. * thread else it will be called in the tasklet context
  2902. *
  2903. * Return: none
  2904. */
  2905. void ol_txrx_lro_flush_handler(void *context,
  2906. void *rxpkt,
  2907. uint16_t staid)
  2908. {
  2909. ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)context;
  2910. if (cdf_unlikely(!pdev)) {
  2911. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2912. "%s: Invalid context", __func__);
  2913. cdf_assert(0);
  2914. return;
  2915. }
  2916. if (pdev->lro_info.lro_flush_cb)
  2917. pdev->lro_info.lro_flush_cb(pdev->lro_info.lro_data);
  2918. else
  2919. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2920. "%s: lro_flush_cb NULL", __func__);
  2921. }
  2922. /**
  2923. * ol_txrx_lro_flush() - LRO flush callback
  2924. * @data: opaque data pointer
  2925. *
  2926. * This is the callback registered with CE to trigger
  2927. * an LRO flush
  2928. *
  2929. * Return: none
  2930. */
  2931. void ol_txrx_lro_flush(void *data)
  2932. {
  2933. p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
  2934. struct cds_ol_rx_pkt *pkt;
  2935. ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)data;
  2936. if (cdf_unlikely(!sched_ctx))
  2937. return;
  2938. if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
  2939. ol_txrx_lro_flush_handler((void *)pdev, NULL, 0);
  2940. } else {
  2941. pkt = cds_alloc_ol_rx_pkt(sched_ctx);
  2942. if (cdf_unlikely(!pkt)) {
  2943. CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
  2944. "%s: Not able to allocate context", __func__);
  2945. return;
  2946. }
  2947. pkt->callback =
  2948. (cds_ol_rx_thread_cb) ol_txrx_lro_flush_handler;
  2949. pkt->context = pdev;
  2950. pkt->Rxpkt = NULL;
  2951. pkt->staId = 0;
  2952. cds_indicate_rxpkt(sched_ctx, pkt);
  2953. }
  2954. }
  2955. /**
  2956. * ol_register_lro_flush_cb() - register the LRO flush callback
  2957. * @handler: callback function
  2958. * @data: opaque data pointer to be passed back
  2959. *
  2960. * Store the LRO flush callback provided and in turn
  2961. * register OL's LRO flush handler with CE
  2962. *
  2963. * Return: none
  2964. */
  2965. void ol_register_lro_flush_cb(void (handler)(void *), void *data)
  2966. {
  2967. struct ol_softc *hif_device =
  2968. (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
  2969. struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2970. pdev->lro_info.lro_flush_cb = handler;
  2971. pdev->lro_info.lro_data = data;
  2972. ce_lro_flush_cb_register(hif_device, ol_txrx_lro_flush, pdev);
  2973. }
  2974. /**
  2975. * ol_deregister_lro_flush_cb() - deregister the LRO flush
  2976. * callback
  2977. *
  2978. * Remove the LRO flush callback provided and in turn
  2979. * deregister OL's LRO flush handler with CE
  2980. *
  2981. * Return: none
  2982. */
  2983. void ol_deregister_lro_flush_cb(void)
  2984. {
  2985. struct ol_softc *hif_device =
  2986. (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
  2987. struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
  2988. ce_lro_flush_cb_deregister(hif_device);
  2989. pdev->lro_info.lro_flush_cb = NULL;
  2990. pdev->lro_info.lro_data = NULL;
  2991. }
  2992. #endif /* FEATURE_LRO */