dp_main.c 94 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <qdf_types.h>
  19. #include <qdf_lock.h>
  20. #include <qdf_net_types.h>
  21. #include <hal_api.h>
  22. #include <hif.h>
  23. #include <htt.h>
  24. #include <wdi_event.h>
  25. #include <queue.h>
  26. #include "dp_htt.h"
  27. #include "dp_types.h"
  28. #include "dp_internal.h"
  29. #include "dp_tx.h"
  30. #include "dp_rx.h"
  31. #include <cdp_txrx_handle.h>
  32. #include <wlan_cfg.h>
  33. #include "cdp_txrx_cmn_struct.h"
  34. #include <qdf_util.h>
  35. #include "dp_peer.h"
  36. #include "dp_rx_mon.h"
  37. #define DP_INTR_POLL_TIMER_MS 10
  38. #define DP_MCS_LENGTH (6*MAX_MCS)
  39. #define DP_NSS_LENGTH (6*SS_COUNT)
  40. #define DP_RXDMA_ERR_LENGTH (6*MAX_RXDMA_ERRORS)
  41. #define DP_REO_ERR_LENGTH (6*REO_ERROR_TYPE_MAX)
  42. /**
  43. * default_dscp_tid_map - Default DSCP-TID mapping
  44. *
  45. * DSCP TID AC
  46. * 000000 0 WME_AC_BE
  47. * 001000 1 WME_AC_BK
  48. * 010000 1 WME_AC_BK
  49. * 011000 0 WME_AC_BE
  50. * 100000 5 WME_AC_VI
  51. * 101000 5 WME_AC_VI
  52. * 110000 6 WME_AC_VO
  53. * 111000 6 WME_AC_VO
  54. */
  55. static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
  56. 0, 0, 0, 0, 0, 0, 0, 0,
  57. 1, 1, 1, 1, 1, 1, 1, 1,
  58. 1, 1, 1, 1, 1, 1, 1, 1,
  59. 0, 0, 0, 0, 0, 0, 0, 0,
  60. 5, 5, 5, 5, 5, 5, 5, 5,
  61. 5, 5, 5, 5, 5, 5, 5, 5,
  62. 6, 6, 6, 6, 6, 6, 6, 6,
  63. 6, 6, 6, 6, 6, 6, 6, 6,
  64. };
  65. /**
  66. * @brief Select the type of statistics
  67. */
  68. enum dp_stats_type {
  69. STATS_FW = 0,
  70. STATS_HOST = 1,
  71. STATS_TYPE_MAX = 2,
  72. };
  73. /**
  74. * @brief General Firmware statistics options
  75. *
  76. */
  77. enum dp_fw_stats {
  78. TXRX_FW_STATS_INVALID = -1,
  79. };
  80. /**
  81. * @brief Firmware and Host statistics
  82. * currently supported
  83. */
  84. const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
  85. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  86. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  87. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  88. {TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
  89. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  90. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  91. {TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
  92. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  93. {TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
  94. {TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
  95. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  96. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  97. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  98. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  99. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  100. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  101. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  102. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  103. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  104. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  105. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  106. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  107. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  108. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  109. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  110. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  111. {TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
  112. {TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
  113. };
  114. /**
  115. * dp_setup_srng - Internal function to setup SRNG rings used by data path
  116. */
  117. static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
  118. int ring_type, int ring_num, int mac_id, uint32_t num_entries)
  119. {
  120. void *hal_soc = soc->hal_soc;
  121. uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
  122. /* TODO: See if we should get align size from hal */
  123. uint32_t ring_base_align = 8;
  124. struct hal_srng_params ring_params;
  125. uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
  126. num_entries = (num_entries > max_entries) ? max_entries : num_entries;
  127. srng->hal_srng = NULL;
  128. srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
  129. srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
  130. soc->osdev, soc->osdev->dev, srng->alloc_size,
  131. &(srng->base_paddr_unaligned));
  132. if (!srng->base_vaddr_unaligned) {
  133. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  134. FL("alloc failed - ring_type: %d, ring_num %d"),
  135. ring_type, ring_num);
  136. return QDF_STATUS_E_NOMEM;
  137. }
  138. ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
  139. ((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
  140. ring_params.ring_base_paddr = srng->base_paddr_unaligned +
  141. ((unsigned long)(ring_params.ring_base_vaddr) -
  142. (unsigned long)srng->base_vaddr_unaligned);
  143. ring_params.num_entries = num_entries;
  144. /* TODO: Check MSI support and get MSI settings from HIF layer */
  145. ring_params.msi_data = 0;
  146. ring_params.msi_addr = 0;
  147. /* TODO: Setup interrupt timer and batch counter thresholds for
  148. * interrupt mitigation based on ring type
  149. */
  150. ring_params.intr_timer_thres_us = 8;
  151. ring_params.intr_batch_cntr_thres_entries = 1;
  152. /* TODO: Currently hal layer takes care of endianness related settings.
  153. * See if these settings need to passed from DP layer
  154. */
  155. ring_params.flags = 0;
  156. /* Enable low threshold interrupts for rx buffer rings (regular and
  157. * monitor buffer rings.
  158. * TODO: See if this is required for any other ring
  159. */
  160. if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF)) {
  161. /* TODO: Setting low threshold to 1/8th of ring size
  162. * see if this needs to be configurable
  163. */
  164. ring_params.low_threshold = num_entries >> 3;
  165. ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
  166. }
  167. srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
  168. mac_id, &ring_params);
  169. return 0;
  170. }
  171. /**
  172. * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
  173. * Any buffers allocated and attached to ring entries are expected to be freed
  174. * before calling this function.
  175. */
  176. static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
  177. int ring_type, int ring_num)
  178. {
  179. if (!srng->hal_srng) {
  180. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  181. FL("Ring type: %d, num:%d not setup"),
  182. ring_type, ring_num);
  183. return;
  184. }
  185. hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
  186. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  187. srng->alloc_size,
  188. srng->base_vaddr_unaligned,
  189. srng->base_paddr_unaligned, 0);
  190. }
  191. /* TODO: Need this interface from HIF */
  192. void *hif_get_hal_handle(void *hif_handle);
  193. /*
  194. * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
  195. * @dp_ctx: DP SOC handle
  196. * @budget: Number of frames/descriptors that can be processed in one shot
  197. *
  198. * Return: remaining budget/quota for the soc device
  199. */
  200. static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
  201. {
  202. struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
  203. struct dp_soc *soc = int_ctx->soc;
  204. int ring = 0;
  205. uint32_t work_done = 0;
  206. uint32_t budget = dp_budget;
  207. uint8_t tx_mask = int_ctx->tx_ring_mask;
  208. uint8_t rx_mask = int_ctx->rx_ring_mask;
  209. uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
  210. uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
  211. uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
  212. /* Process Tx completion interrupts first to return back buffers */
  213. if (tx_mask) {
  214. for (ring = 0; ring < soc->num_tcl_data_rings; ring++) {
  215. if (tx_mask & (1 << ring)) {
  216. work_done =
  217. dp_tx_comp_handler(soc, ring, budget);
  218. budget -= work_done;
  219. if (work_done)
  220. QDF_TRACE(QDF_MODULE_ID_DP,
  221. QDF_TRACE_LEVEL_INFO,
  222. "tx mask 0x%x ring %d,"
  223. "budget %d",
  224. tx_mask, ring, budget);
  225. if (budget <= 0)
  226. goto budget_done;
  227. }
  228. }
  229. }
  230. /* Process REO Exception ring interrupt */
  231. if (rx_err_mask) {
  232. work_done = dp_rx_err_process(soc,
  233. soc->reo_exception_ring.hal_srng, budget);
  234. budget -= work_done;
  235. if (work_done)
  236. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  237. "REO Exception Ring: work_done %d budget %d",
  238. work_done, budget);
  239. if (budget <= 0) {
  240. goto budget_done;
  241. }
  242. }
  243. /* Process Rx WBM release ring interrupt */
  244. if (rx_wbm_rel_mask) {
  245. work_done = dp_rx_wbm_err_process(soc,
  246. soc->rx_rel_ring.hal_srng, budget);
  247. budget -= work_done;
  248. if (work_done)
  249. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  250. "WBM Release Ring: work_done %d budget %d",
  251. work_done, budget);
  252. if (budget <= 0) {
  253. goto budget_done;
  254. }
  255. }
  256. /* Process Rx interrupts */
  257. if (rx_mask) {
  258. for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
  259. if (rx_mask & (1 << ring)) {
  260. work_done =
  261. dp_rx_process(soc,
  262. soc->reo_dest_ring[ring].hal_srng,
  263. budget);
  264. budget -= work_done;
  265. if (work_done)
  266. QDF_TRACE(QDF_MODULE_ID_DP,
  267. QDF_TRACE_LEVEL_INFO,
  268. "rx mask 0x%x ring %d,"
  269. "budget %d",
  270. tx_mask, ring, budget);
  271. if (budget <= 0)
  272. goto budget_done;
  273. }
  274. }
  275. }
  276. if (reo_status_mask)
  277. dp_reo_status_ring_handler(soc);
  278. /* Process Rx monitor interrupts */
  279. for (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
  280. if (int_ctx->rx_mon_ring_mask & (1 << ring)) {
  281. work_done =
  282. dp_mon_process(soc, ring, budget);
  283. budget -= work_done;
  284. }
  285. }
  286. budget_done:
  287. return dp_budget - budget;
  288. }
  289. /* dp_interrupt_timer()- timer poll for interrupts
  290. *
  291. * @arg: SoC Handle
  292. *
  293. * Return:
  294. *
  295. */
  296. #ifdef DP_INTR_POLL_BASED
  297. static void dp_interrupt_timer(void *arg)
  298. {
  299. struct dp_soc *soc = (struct dp_soc *) arg;
  300. int i;
  301. if (qdf_atomic_read(&soc->cmn_init_done)) {
  302. for (i = 0;
  303. i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
  304. dp_service_srngs(&soc->intr_ctx[i], 0xffff);
  305. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  306. }
  307. }
  308. /*
  309. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  310. * @txrx_soc: DP SOC handle
  311. *
  312. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  313. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  314. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  315. *
  316. * Return: 0 for success. nonzero for failure.
  317. */
  318. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  319. {
  320. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  321. int i;
  322. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  323. soc->intr_ctx[i].tx_ring_mask = 0xF;
  324. soc->intr_ctx[i].rx_ring_mask = 0xF;
  325. soc->intr_ctx[i].rx_mon_ring_mask = 0x1;
  326. soc->intr_ctx[i].rx_err_ring_mask = 0x1;
  327. soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0x1;
  328. soc->intr_ctx[i].reo_status_ring_mask = 0x1;
  329. soc->intr_ctx[i].soc = soc;
  330. }
  331. qdf_timer_init(soc->osdev, &soc->int_timer,
  332. dp_interrupt_timer, (void *)soc,
  333. QDF_TIMER_TYPE_WAKE_APPS);
  334. return QDF_STATUS_SUCCESS;
  335. }
  336. /*
  337. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  338. * @txrx_soc: DP SOC handle
  339. *
  340. * Return: void
  341. */
  342. static void dp_soc_interrupt_detach(void *txrx_soc)
  343. {
  344. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  345. qdf_timer_stop(&soc->int_timer);
  346. qdf_timer_free(&soc->int_timer);
  347. }
  348. #else
  349. /*
  350. * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  351. * @txrx_soc: DP SOC handle
  352. *
  353. * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
  354. * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
  355. * rx_monitor_ring mask to indicate the rings that are processed by the handler.
  356. *
  357. * Return: 0 for success. nonzero for failure.
  358. */
  359. static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
  360. {
  361. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  362. int i = 0;
  363. int num_irq = 0;
  364. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  365. int j = 0;
  366. int ret = 0;
  367. /* Map of IRQ ids registered with one interrupt context */
  368. int irq_id_map[HIF_MAX_GRP_IRQ];
  369. int tx_mask =
  370. wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
  371. int rx_mask =
  372. wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
  373. int rx_mon_mask =
  374. wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
  375. soc->intr_ctx[i].tx_ring_mask = tx_mask;
  376. soc->intr_ctx[i].rx_ring_mask = rx_mask;
  377. soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
  378. soc->intr_ctx[i].soc = soc;
  379. num_irq = 0;
  380. for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
  381. if (tx_mask & (1 << j)) {
  382. irq_id_map[num_irq++] =
  383. (wbm2host_tx_completions_ring1 - j);
  384. }
  385. if (rx_mask & (1 << j)) {
  386. irq_id_map[num_irq++] =
  387. (reo2host_destination_ring1 - j);
  388. }
  389. if (rx_mon_mask & (1 << j)) {
  390. irq_id_map[num_irq++] =
  391. (rxdma2host_monitor_destination_mac1
  392. - j);
  393. }
  394. }
  395. ret = hif_register_ext_group_int_handler(soc->hif_handle,
  396. num_irq, irq_id_map,
  397. dp_service_srngs,
  398. &soc->intr_ctx[i]);
  399. if (ret) {
  400. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  401. FL("failed, ret = %d"), ret);
  402. return QDF_STATUS_E_FAILURE;
  403. }
  404. }
  405. return QDF_STATUS_SUCCESS;
  406. }
  407. /*
  408. * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
  409. * @txrx_soc: DP SOC handle
  410. *
  411. * Return: void
  412. */
  413. static void dp_soc_interrupt_detach(void *txrx_soc)
  414. {
  415. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  416. int i;
  417. for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
  418. soc->intr_ctx[i].tx_ring_mask = 0;
  419. soc->intr_ctx[i].rx_ring_mask = 0;
  420. soc->intr_ctx[i].rx_mon_ring_mask = 0;
  421. }
  422. }
  423. #endif
  424. #define AVG_MAX_MPDUS_PER_TID 128
  425. #define AVG_TIDS_PER_CLIENT 2
  426. #define AVG_FLOWS_PER_TID 2
  427. #define AVG_MSDUS_PER_FLOW 128
  428. #define AVG_MSDUS_PER_MPDU 4
  429. /*
  430. * Allocate and setup link descriptor pool that will be used by HW for
  431. * various link and queue descriptors and managed by WBM
  432. */
  433. static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
  434. {
  435. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  436. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  437. uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
  438. uint32_t num_mpdus_per_link_desc =
  439. hal_num_mpdus_per_link_desc(soc->hal_soc);
  440. uint32_t num_msdus_per_link_desc =
  441. hal_num_msdus_per_link_desc(soc->hal_soc);
  442. uint32_t num_mpdu_links_per_queue_desc =
  443. hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
  444. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  445. uint32_t total_link_descs, total_mem_size;
  446. uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
  447. uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
  448. uint32_t num_link_desc_banks;
  449. uint32_t last_bank_size = 0;
  450. uint32_t entry_size, num_entries;
  451. int i;
  452. /* Only Tx queue descriptors are allocated from common link descriptor
  453. * pool Rx queue descriptors are not included in this because (REO queue
  454. * extension descriptors) they are expected to be allocated contiguously
  455. * with REO queue descriptors
  456. */
  457. num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  458. AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
  459. num_mpdu_queue_descs = num_mpdu_link_descs /
  460. num_mpdu_links_per_queue_desc;
  461. num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  462. AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
  463. num_msdus_per_link_desc;
  464. num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
  465. AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
  466. num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
  467. num_tx_msdu_link_descs + num_rx_msdu_link_descs;
  468. /* Round up to power of 2 */
  469. total_link_descs = 1;
  470. while (total_link_descs < num_entries)
  471. total_link_descs <<= 1;
  472. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  473. FL("total_link_descs: %u, link_desc_size: %d"),
  474. total_link_descs, link_desc_size);
  475. total_mem_size = total_link_descs * link_desc_size;
  476. total_mem_size += link_desc_align;
  477. if (total_mem_size <= max_alloc_size) {
  478. num_link_desc_banks = 0;
  479. last_bank_size = total_mem_size;
  480. } else {
  481. num_link_desc_banks = (total_mem_size) /
  482. (max_alloc_size - link_desc_align);
  483. last_bank_size = total_mem_size %
  484. (max_alloc_size - link_desc_align);
  485. }
  486. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  487. FL("total_mem_size: %d, num_link_desc_banks: %u"),
  488. total_mem_size, num_link_desc_banks);
  489. for (i = 0; i < num_link_desc_banks; i++) {
  490. soc->link_desc_banks[i].base_vaddr_unaligned =
  491. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  492. max_alloc_size,
  493. &(soc->link_desc_banks[i].base_paddr_unaligned));
  494. soc->link_desc_banks[i].size = max_alloc_size;
  495. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
  496. soc->link_desc_banks[i].base_vaddr_unaligned) +
  497. ((unsigned long)(
  498. soc->link_desc_banks[i].base_vaddr_unaligned) %
  499. link_desc_align));
  500. soc->link_desc_banks[i].base_paddr = (unsigned long)(
  501. soc->link_desc_banks[i].base_paddr_unaligned) +
  502. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  503. (unsigned long)(
  504. soc->link_desc_banks[i].base_vaddr_unaligned));
  505. if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
  506. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  507. FL("Link descriptor memory alloc failed"));
  508. goto fail;
  509. }
  510. }
  511. if (last_bank_size) {
  512. /* Allocate last bank in case total memory required is not exact
  513. * multiple of max_alloc_size
  514. */
  515. soc->link_desc_banks[i].base_vaddr_unaligned =
  516. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  517. last_bank_size,
  518. &(soc->link_desc_banks[i].base_paddr_unaligned));
  519. soc->link_desc_banks[i].size = last_bank_size;
  520. soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
  521. (soc->link_desc_banks[i].base_vaddr_unaligned) +
  522. ((unsigned long)(
  523. soc->link_desc_banks[i].base_vaddr_unaligned) %
  524. link_desc_align));
  525. soc->link_desc_banks[i].base_paddr =
  526. (unsigned long)(
  527. soc->link_desc_banks[i].base_paddr_unaligned) +
  528. ((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
  529. (unsigned long)(
  530. soc->link_desc_banks[i].base_vaddr_unaligned));
  531. }
  532. /* Allocate and setup link descriptor idle list for HW internal use */
  533. entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
  534. total_mem_size = entry_size * total_link_descs;
  535. if (total_mem_size <= max_alloc_size) {
  536. void *desc;
  537. if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
  538. WBM_IDLE_LINK, 0, 0, total_link_descs)) {
  539. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  540. FL("Link desc idle ring setup failed"));
  541. goto fail;
  542. }
  543. hal_srng_access_start_unlocked(soc->hal_soc,
  544. soc->wbm_idle_link_ring.hal_srng);
  545. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  546. soc->link_desc_banks[i].base_paddr; i++) {
  547. uint32_t num_entries = (soc->link_desc_banks[i].size -
  548. (unsigned long)(
  549. soc->link_desc_banks[i].base_vaddr) -
  550. (unsigned long)(
  551. soc->link_desc_banks[i].base_vaddr_unaligned))
  552. / link_desc_size;
  553. unsigned long paddr = (unsigned long)(
  554. soc->link_desc_banks[i].base_paddr);
  555. while (num_entries && (desc = hal_srng_src_get_next(
  556. soc->hal_soc,
  557. soc->wbm_idle_link_ring.hal_srng))) {
  558. hal_set_link_desc_addr(desc, i, paddr);
  559. num_entries--;
  560. paddr += link_desc_size;
  561. }
  562. }
  563. hal_srng_access_end_unlocked(soc->hal_soc,
  564. soc->wbm_idle_link_ring.hal_srng);
  565. } else {
  566. uint32_t num_scatter_bufs;
  567. uint32_t num_entries_per_buf;
  568. uint32_t rem_entries;
  569. uint8_t *scatter_buf_ptr;
  570. uint16_t scatter_buf_num;
  571. soc->wbm_idle_scatter_buf_size =
  572. hal_idle_list_scatter_buf_size(soc->hal_soc);
  573. num_entries_per_buf = hal_idle_scatter_buf_num_entries(
  574. soc->hal_soc, soc->wbm_idle_scatter_buf_size);
  575. num_scatter_bufs = (total_mem_size /
  576. soc->wbm_idle_scatter_buf_size) + (total_mem_size %
  577. soc->wbm_idle_scatter_buf_size) ? 1 : 0;
  578. for (i = 0; i < num_scatter_bufs; i++) {
  579. soc->wbm_idle_scatter_buf_base_vaddr[i] =
  580. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  581. soc->wbm_idle_scatter_buf_size,
  582. &(soc->wbm_idle_scatter_buf_base_paddr[i]));
  583. if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
  584. QDF_TRACE(QDF_MODULE_ID_DP,
  585. QDF_TRACE_LEVEL_ERROR,
  586. FL("Scatter list memory alloc failed"));
  587. goto fail;
  588. }
  589. }
  590. /* Populate idle list scatter buffers with link descriptor
  591. * pointers
  592. */
  593. scatter_buf_num = 0;
  594. scatter_buf_ptr = (uint8_t *)(
  595. soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
  596. rem_entries = num_entries_per_buf;
  597. for (i = 0; i < MAX_LINK_DESC_BANKS &&
  598. soc->link_desc_banks[i].base_paddr; i++) {
  599. uint32_t num_link_descs =
  600. (soc->link_desc_banks[i].size -
  601. (unsigned long)(
  602. soc->link_desc_banks[i].base_vaddr) -
  603. (unsigned long)(
  604. soc->link_desc_banks[i].base_vaddr_unaligned)) /
  605. link_desc_size;
  606. unsigned long paddr = (unsigned long)(
  607. soc->link_desc_banks[i].base_paddr);
  608. void *desc = NULL;
  609. while (num_link_descs && (desc =
  610. hal_srng_src_get_next(soc->hal_soc,
  611. soc->wbm_idle_link_ring.hal_srng))) {
  612. hal_set_link_desc_addr((void *)scatter_buf_ptr,
  613. i, paddr);
  614. num_link_descs--;
  615. paddr += link_desc_size;
  616. if (rem_entries) {
  617. rem_entries--;
  618. scatter_buf_ptr += link_desc_size;
  619. } else {
  620. rem_entries = num_entries_per_buf;
  621. scatter_buf_num++;
  622. scatter_buf_ptr = (uint8_t *)(
  623. soc->wbm_idle_scatter_buf_base_vaddr[
  624. scatter_buf_num]);
  625. }
  626. }
  627. }
  628. /* Setup link descriptor idle list in HW */
  629. hal_setup_link_idle_list(soc->hal_soc,
  630. soc->wbm_idle_scatter_buf_base_paddr,
  631. soc->wbm_idle_scatter_buf_base_vaddr,
  632. num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
  633. (uint32_t)(scatter_buf_ptr -
  634. (uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
  635. scatter_buf_num])));
  636. }
  637. return 0;
  638. fail:
  639. if (soc->wbm_idle_link_ring.hal_srng) {
  640. dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
  641. WBM_IDLE_LINK, 0);
  642. }
  643. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  644. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  645. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  646. soc->wbm_idle_scatter_buf_size,
  647. soc->wbm_idle_scatter_buf_base_vaddr[i],
  648. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  649. }
  650. }
  651. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  652. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  653. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  654. soc->link_desc_banks[i].size,
  655. soc->link_desc_banks[i].base_vaddr_unaligned,
  656. soc->link_desc_banks[i].base_paddr_unaligned,
  657. 0);
  658. }
  659. }
  660. return QDF_STATUS_E_FAILURE;
  661. }
  662. #ifdef notused
  663. /*
  664. * Free link descriptor pool that was setup HW
  665. */
  666. static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
  667. {
  668. int i;
  669. if (soc->wbm_idle_link_ring.hal_srng) {
  670. dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
  671. WBM_IDLE_LINK, 0);
  672. }
  673. for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
  674. if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
  675. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  676. soc->wbm_idle_scatter_buf_size,
  677. soc->wbm_idle_scatter_buf_base_vaddr[i],
  678. soc->wbm_idle_scatter_buf_base_paddr[i], 0);
  679. }
  680. }
  681. for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
  682. if (soc->link_desc_banks[i].base_vaddr_unaligned) {
  683. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  684. soc->link_desc_banks[i].size,
  685. soc->link_desc_banks[i].base_vaddr_unaligned,
  686. soc->link_desc_banks[i].base_paddr_unaligned,
  687. 0);
  688. }
  689. }
  690. }
  691. #endif /* notused */
  692. /* TODO: Following should be configurable */
  693. #define WBM_RELEASE_RING_SIZE 64
  694. #define TCL_DATA_RING_SIZE 512
  695. #define TX_COMP_RING_SIZE 1024
  696. #define TCL_CMD_RING_SIZE 32
  697. #define TCL_STATUS_RING_SIZE 32
  698. #define REO_DST_RING_SIZE 2048
  699. #define REO_REINJECT_RING_SIZE 32
  700. #define RX_RELEASE_RING_SIZE 1024
  701. #define REO_EXCEPTION_RING_SIZE 128
  702. #define REO_CMD_RING_SIZE 32
  703. #define REO_STATUS_RING_SIZE 32
  704. #define RXDMA_BUF_RING_SIZE 1024
  705. #define RXDMA_REFILL_RING_SIZE 2048
  706. #define RXDMA_MONITOR_BUF_RING_SIZE 1024
  707. #define RXDMA_MONITOR_DST_RING_SIZE 1024
  708. #define RXDMA_MONITOR_STATUS_RING_SIZE 1024
  709. #define RXDMA_MONITOR_DESC_RING_SIZE 1024
  710. /*
  711. * dp_soc_cmn_setup() - Common SoC level initializion
  712. * @soc: Datapath SOC handle
  713. *
  714. * This is an internal function used to setup common SOC data structures,
  715. * to be called from PDEV attach after receiving HW mode capabilities from FW
  716. */
  717. static int dp_soc_cmn_setup(struct dp_soc *soc)
  718. {
  719. int i;
  720. struct hal_reo_params reo_params;
  721. if (qdf_atomic_read(&soc->cmn_init_done))
  722. return 0;
  723. if (dp_peer_find_attach(soc))
  724. goto fail0;
  725. if (dp_hw_link_desc_pool_setup(soc))
  726. goto fail1;
  727. /* Setup SRNG rings */
  728. /* Common rings */
  729. if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
  730. WBM_RELEASE_RING_SIZE)) {
  731. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  732. FL("dp_srng_setup failed for wbm_desc_rel_ring"));
  733. goto fail1;
  734. }
  735. soc->num_tcl_data_rings = 0;
  736. /* Tx data rings */
  737. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  738. soc->num_tcl_data_rings =
  739. wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
  740. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  741. if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
  742. TCL_DATA, i, 0, TCL_DATA_RING_SIZE)) {
  743. QDF_TRACE(QDF_MODULE_ID_DP,
  744. QDF_TRACE_LEVEL_ERROR,
  745. FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
  746. goto fail1;
  747. }
  748. if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
  749. WBM2SW_RELEASE, i, 0, TX_COMP_RING_SIZE)) {
  750. QDF_TRACE(QDF_MODULE_ID_DP,
  751. QDF_TRACE_LEVEL_ERROR,
  752. FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
  753. goto fail1;
  754. }
  755. }
  756. } else {
  757. /* This will be incremented during per pdev ring setup */
  758. soc->num_tcl_data_rings = 0;
  759. }
  760. if (dp_tx_soc_attach(soc)) {
  761. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  762. FL("dp_tx_soc_attach failed"));
  763. goto fail1;
  764. }
  765. /* TCL command and status rings */
  766. if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
  767. TCL_CMD_RING_SIZE)) {
  768. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  769. FL("dp_srng_setup failed for tcl_cmd_ring"));
  770. goto fail1;
  771. }
  772. if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
  773. TCL_STATUS_RING_SIZE)) {
  774. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  775. FL("dp_srng_setup failed for tcl_status_ring"));
  776. goto fail1;
  777. }
  778. /* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
  779. * descriptors
  780. */
  781. /* Rx data rings */
  782. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  783. soc->num_reo_dest_rings =
  784. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  785. QDF_TRACE(QDF_MODULE_ID_DP,
  786. QDF_TRACE_LEVEL_ERROR,
  787. FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
  788. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  789. if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
  790. i, 0, REO_DST_RING_SIZE)) {
  791. QDF_TRACE(QDF_MODULE_ID_DP,
  792. QDF_TRACE_LEVEL_ERROR,
  793. FL("dp_srng_setup failed for reo_dest_ring[%d]"), i);
  794. goto fail1;
  795. }
  796. }
  797. } else {
  798. /* This will be incremented during per pdev ring setup */
  799. soc->num_reo_dest_rings = 0;
  800. }
  801. /* TBD: call dp_rx_init to setup Rx SW descriptors */
  802. /* REO reinjection ring */
  803. if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
  804. REO_REINJECT_RING_SIZE)) {
  805. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  806. FL("dp_srng_setup failed for reo_reinject_ring"));
  807. goto fail1;
  808. }
  809. /* Rx release ring */
  810. if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
  811. RX_RELEASE_RING_SIZE)) {
  812. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  813. FL("dp_srng_setup failed for rx_rel_ring"));
  814. goto fail1;
  815. }
  816. /* Rx exception ring */
  817. if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
  818. MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
  819. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  820. FL("dp_srng_setup failed for reo_exception_ring"));
  821. goto fail1;
  822. }
  823. /* REO command and status rings */
  824. if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
  825. REO_CMD_RING_SIZE)) {
  826. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  827. FL("dp_srng_setup failed for reo_cmd_ring"));
  828. goto fail1;
  829. }
  830. hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
  831. TAILQ_INIT(&soc->rx.reo_cmd_list);
  832. qdf_spinlock_create(&soc->rx.reo_cmd_lock);
  833. if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
  834. REO_STATUS_RING_SIZE)) {
  835. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  836. FL("dp_srng_setup failed for reo_status_ring"));
  837. goto fail1;
  838. }
  839. dp_soc_interrupt_attach(soc);
  840. /* Setup HW REO */
  841. qdf_mem_zero(&reo_params, sizeof(reo_params));
  842. if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx))
  843. reo_params.rx_hash_enabled = true;
  844. hal_reo_setup(soc->hal_soc, &reo_params);
  845. qdf_atomic_set(&soc->cmn_init_done, 1);
  846. return 0;
  847. fail1:
  848. /*
  849. * Cleanup will be done as part of soc_detach, which will
  850. * be called on pdev attach failure
  851. */
  852. fail0:
  853. return QDF_STATUS_E_FAILURE;
  854. }
  855. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
  856. static void dp_lro_hash_setup(struct dp_soc *soc)
  857. {
  858. struct cdp_lro_hash_config lro_hash;
  859. if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
  860. !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
  861. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  862. FL("LRO disabled RX hash disabled"));
  863. return;
  864. }
  865. qdf_mem_zero(&lro_hash, sizeof(lro_hash));
  866. if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx)) {
  867. lro_hash.lro_enable = 1;
  868. lro_hash.tcp_flag = QDF_TCPHDR_ACK;
  869. lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
  870. QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
  871. QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
  872. }
  873. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, FL("enabled"));
  874. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
  875. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  876. LRO_IPV4_SEED_ARR_SZ));
  877. qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
  878. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  879. LRO_IPV6_SEED_ARR_SZ));
  880. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  881. "lro_hash: lro_enable: 0x%x"
  882. "lro_hash: tcp_flag 0x%x tcp_flag_mask 0x%x",
  883. lro_hash.lro_enable, lro_hash.tcp_flag,
  884. lro_hash.tcp_flag_mask);
  885. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  886. FL("lro_hash: toeplitz_hash_ipv4:"));
  887. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  888. QDF_TRACE_LEVEL_ERROR,
  889. (void *)lro_hash.toeplitz_hash_ipv4,
  890. (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
  891. LRO_IPV4_SEED_ARR_SZ));
  892. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  893. FL("lro_hash: toeplitz_hash_ipv6:"));
  894. qdf_trace_hex_dump(QDF_MODULE_ID_DP,
  895. QDF_TRACE_LEVEL_ERROR,
  896. (void *)lro_hash.toeplitz_hash_ipv6,
  897. (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
  898. LRO_IPV6_SEED_ARR_SZ));
  899. qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
  900. if (soc->cdp_soc.ol_ops->lro_hash_config)
  901. (void)soc->cdp_soc.ol_ops->lro_hash_config
  902. (soc->osif_soc, &lro_hash);
  903. }
  904. /*
  905. * dp_rxdma_ring_setup() - configure the RX DMA rings
  906. * @soc: data path SoC handle
  907. * @pdev: Physical device handle
  908. *
  909. * Return: 0 - success, > 0 - failure
  910. */
  911. #ifdef QCA_HOST2FW_RXBUF_RING
  912. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  913. struct dp_pdev *pdev)
  914. {
  915. int max_mac_rings =
  916. wlan_cfg_get_num_mac_rings
  917. (pdev->wlan_cfg_ctx);
  918. int i;
  919. for (i = 0; i < max_mac_rings; i++) {
  920. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  921. "%s: pdev_id %d mac_id %d\n",
  922. __func__, pdev->pdev_id, i);
  923. if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
  924. RXDMA_BUF, 1, i, RXDMA_BUF_RING_SIZE)) {
  925. QDF_TRACE(QDF_MODULE_ID_DP,
  926. QDF_TRACE_LEVEL_ERROR,
  927. FL("failed rx mac ring setup"));
  928. return QDF_STATUS_E_FAILURE;
  929. }
  930. }
  931. return QDF_STATUS_SUCCESS;
  932. }
  933. #else
  934. static int dp_rxdma_ring_setup(struct dp_soc *soc,
  935. struct dp_pdev *pdev)
  936. {
  937. return QDF_STATUS_SUCCESS;
  938. }
  939. #endif
  940. /**
  941. * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
  942. * @pdev - DP_PDEV handle
  943. *
  944. * Return: void
  945. */
  946. static inline void
  947. dp_dscp_tid_map_setup(struct dp_pdev *pdev)
  948. {
  949. uint8_t map_id;
  950. for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
  951. qdf_mem_copy(pdev->dscp_tid_map[map_id], default_dscp_tid_map,
  952. sizeof(default_dscp_tid_map));
  953. }
  954. for (map_id = 0; map_id < HAL_MAX_HW_DSCP_TID_MAPS; map_id++) {
  955. hal_tx_set_dscp_tid_map(pdev->soc->hal_soc,
  956. pdev->dscp_tid_map[map_id],
  957. map_id);
  958. }
  959. }
  960. /*
  961. * dp_pdev_attach_wifi3() - attach txrx pdev
  962. * @osif_pdev: Opaque PDEV handle from OSIF/HDD
  963. * @txrx_soc: Datapath SOC handle
  964. * @htc_handle: HTC handle for host-target interface
  965. * @qdf_osdev: QDF OS device
  966. * @pdev_id: PDEV ID
  967. *
  968. * Return: DP PDEV handle on success, NULL on failure
  969. */
  970. static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
  971. struct cdp_cfg *ctrl_pdev,
  972. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
  973. {
  974. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  975. struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
  976. if (!pdev) {
  977. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  978. FL("DP PDEV memory allocation failed"));
  979. goto fail0;
  980. }
  981. pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach();
  982. if (!pdev->wlan_cfg_ctx) {
  983. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  984. FL("pdev cfg_attach failed"));
  985. qdf_mem_free(pdev);
  986. goto fail0;
  987. }
  988. pdev->soc = soc;
  989. pdev->osif_pdev = ctrl_pdev;
  990. pdev->pdev_id = pdev_id;
  991. soc->pdev_list[pdev_id] = pdev;
  992. TAILQ_INIT(&pdev->vdev_list);
  993. pdev->vdev_count = 0;
  994. if (dp_soc_cmn_setup(soc)) {
  995. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  996. FL("dp_soc_cmn_setup failed"));
  997. goto fail1;
  998. }
  999. /* Setup per PDEV TCL rings if configured */
  1000. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1001. if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
  1002. pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
  1003. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1004. FL("dp_srng_setup failed for tcl_data_ring"));
  1005. goto fail1;
  1006. }
  1007. if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
  1008. WBM2SW_RELEASE, pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
  1009. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1010. FL("dp_srng_setup failed for tx_comp_ring"));
  1011. goto fail1;
  1012. }
  1013. soc->num_tcl_data_rings++;
  1014. }
  1015. /* Tx specific init */
  1016. if (dp_tx_pdev_attach(pdev)) {
  1017. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1018. FL("dp_tx_pdev_attach failed"));
  1019. goto fail1;
  1020. }
  1021. /* Setup per PDEV REO rings if configured */
  1022. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1023. if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
  1024. pdev_id, pdev_id, REO_DST_RING_SIZE)) {
  1025. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1026. FL("dp_srng_setup failed for reo_dest_ringn"));
  1027. goto fail1;
  1028. }
  1029. soc->num_reo_dest_rings++;
  1030. }
  1031. if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
  1032. RXDMA_REFILL_RING_SIZE)) {
  1033. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1034. FL("dp_srng_setup failed rx refill ring"));
  1035. goto fail1;
  1036. }
  1037. if (dp_rxdma_ring_setup(soc, pdev)) {
  1038. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1039. FL("RXDMA ring config failed"));
  1040. goto fail1;
  1041. }
  1042. if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0,
  1043. pdev_id, RXDMA_MONITOR_BUF_RING_SIZE)) {
  1044. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1045. FL("dp_srng_setup failed for rxdma_mon_buf_ring"));
  1046. goto fail1;
  1047. }
  1048. if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0,
  1049. pdev_id, RXDMA_MONITOR_DST_RING_SIZE)) {
  1050. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1051. FL("dp_srng_setup failed for rxdma_mon_dst_ring"));
  1052. goto fail1;
  1053. }
  1054. if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring,
  1055. RXDMA_MONITOR_STATUS, 0, pdev_id,
  1056. RXDMA_MONITOR_STATUS_RING_SIZE)) {
  1057. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1058. FL("dp_srng_setup failed for rxdma_mon_status_ring"));
  1059. goto fail1;
  1060. }
  1061. if (dp_srng_setup(soc, &pdev->rxdma_mon_desc_ring,
  1062. RXDMA_MONITOR_DESC, 0, pdev_id, RXDMA_MONITOR_DESC_RING_SIZE)) {
  1063. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1064. "dp_srng_setup failed for rxdma_mon_desc_ring\n");
  1065. goto fail1;
  1066. }
  1067. /* Rx specific init */
  1068. if (dp_rx_pdev_attach(pdev)) {
  1069. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1070. FL("dp_rx_pdev_attach failed "));
  1071. goto fail0;
  1072. }
  1073. DP_STATS_INIT(pdev);
  1074. #ifndef CONFIG_WIN
  1075. /* MCL */
  1076. dp_local_peer_id_pool_init(pdev);
  1077. #endif
  1078. dp_dscp_tid_map_setup(pdev);
  1079. /* Rx monitor mode specific init */
  1080. if (dp_rx_pdev_mon_attach(pdev)) {
  1081. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1082. "dp_rx_pdev_attach failed\n");
  1083. goto fail0;
  1084. }
  1085. return (struct cdp_pdev *)pdev;
  1086. fail1:
  1087. dp_pdev_detach_wifi3((struct cdp_pdev *)pdev, 0);
  1088. fail0:
  1089. return NULL;
  1090. }
  1091. /*
  1092. * dp_rxdma_ring_cleanup() - configure the RX DMA rings
  1093. * @soc: data path SoC handle
  1094. * @pdev: Physical device handle
  1095. *
  1096. * Return: void
  1097. */
  1098. #ifdef QCA_HOST2FW_RXBUF_RING
  1099. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  1100. struct dp_pdev *pdev)
  1101. {
  1102. int max_mac_rings =
  1103. wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
  1104. int i;
  1105. max_mac_rings = max_mac_rings < MAX_RX_MAC_RINGS ?
  1106. max_mac_rings : MAX_RX_MAC_RINGS;
  1107. for (i = 0; i < MAX_RX_MAC_RINGS; i++)
  1108. dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
  1109. RXDMA_BUF, 1);
  1110. }
  1111. #else
  1112. static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
  1113. struct dp_pdev *pdev)
  1114. {
  1115. }
  1116. #endif
  1117. /*
  1118. * dp_pdev_detach_wifi3() - detach txrx pdev
  1119. * @txrx_pdev: Datapath PDEV handle
  1120. * @force: Force detach
  1121. *
  1122. */
  1123. static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
  1124. {
  1125. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  1126. struct dp_soc *soc = pdev->soc;
  1127. dp_tx_pdev_detach(pdev);
  1128. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1129. dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
  1130. TCL_DATA, pdev->pdev_id);
  1131. dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
  1132. WBM2SW_RELEASE, pdev->pdev_id);
  1133. }
  1134. dp_rx_pdev_detach(pdev);
  1135. dp_rx_pdev_mon_detach(pdev);
  1136. /* Setup per PDEV REO rings if configured */
  1137. if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1138. dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
  1139. REO_DST, pdev->pdev_id);
  1140. }
  1141. dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
  1142. dp_rxdma_ring_cleanup(soc, pdev);
  1143. dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0);
  1144. dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0);
  1145. dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
  1146. RXDMA_MONITOR_STATUS, 0);
  1147. dp_srng_cleanup(soc, &pdev->rxdma_mon_desc_ring,
  1148. RXDMA_MONITOR_DESC, 0);
  1149. soc->pdev_list[pdev->pdev_id] = NULL;
  1150. qdf_mem_free(pdev);
  1151. }
  1152. /*
  1153. * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
  1154. * @soc: DP SOC handle
  1155. */
  1156. static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
  1157. {
  1158. struct reo_desc_list_node *desc;
  1159. struct dp_rx_tid *rx_tid;
  1160. qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
  1161. while (qdf_list_remove_front(&soc->reo_desc_freelist,
  1162. (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
  1163. rx_tid = &desc->rx_tid;
  1164. qdf_mem_unmap_nbytes_single(soc->osdev,
  1165. rx_tid->hw_qdesc_paddr,
  1166. QDF_DMA_BIDIRECTIONAL,
  1167. rx_tid->hw_qdesc_alloc_size);
  1168. qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
  1169. qdf_mem_free(desc);
  1170. }
  1171. qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
  1172. qdf_list_destroy(&soc->reo_desc_freelist);
  1173. qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
  1174. }
  1175. /*
  1176. * dp_soc_detach_wifi3() - Detach txrx SOC
  1177. * @txrx_soc: DP SOC handle
  1178. *
  1179. */
  1180. static void dp_soc_detach_wifi3(void *txrx_soc)
  1181. {
  1182. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  1183. int i;
  1184. qdf_atomic_set(&soc->cmn_init_done, 0);
  1185. dp_soc_interrupt_detach(soc);
  1186. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1187. if (soc->pdev_list[i])
  1188. dp_pdev_detach_wifi3(
  1189. (struct cdp_pdev *)soc->pdev_list[i], 1);
  1190. }
  1191. dp_peer_find_detach(soc);
  1192. /* TBD: Call Tx and Rx cleanup functions to free buffers and
  1193. * SW descriptors
  1194. */
  1195. /* Free the ring memories */
  1196. /* Common rings */
  1197. dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
  1198. /* Tx data rings */
  1199. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  1200. dp_tx_soc_detach(soc);
  1201. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  1202. dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
  1203. TCL_DATA, i);
  1204. dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
  1205. WBM2SW_RELEASE, i);
  1206. }
  1207. }
  1208. /* TCL command and status rings */
  1209. dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
  1210. dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
  1211. /* Rx data rings */
  1212. if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
  1213. soc->num_reo_dest_rings =
  1214. wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
  1215. for (i = 0; i < soc->num_reo_dest_rings; i++) {
  1216. /* TODO: Get number of rings and ring sizes
  1217. * from wlan_cfg
  1218. */
  1219. dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
  1220. REO_DST, i);
  1221. }
  1222. }
  1223. /* REO reinjection ring */
  1224. dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
  1225. /* Rx release ring */
  1226. dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
  1227. /* Rx exception ring */
  1228. /* TODO: Better to store ring_type and ring_num in
  1229. * dp_srng during setup
  1230. */
  1231. dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
  1232. /* REO command and status rings */
  1233. dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
  1234. dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
  1235. qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
  1236. qdf_spinlock_destroy(&soc->peer_ref_mutex);
  1237. htt_soc_detach(soc->htt_handle);
  1238. dp_reo_desc_freelist_destroy(soc);
  1239. }
  1240. /*
  1241. * dp_rxdma_ring_config() - configure the RX DMA rings
  1242. *
  1243. * This function is used to configure the MAC rings.
  1244. * On MCL host provides buffers in Host2FW ring
  1245. * FW refills (copies) buffers to the ring and updates
  1246. * ring_idx in register
  1247. *
  1248. * @soc: data path SoC handle
  1249. * @pdev: Physical device handle
  1250. *
  1251. * Return: void
  1252. */
  1253. #ifdef QCA_HOST2FW_RXBUF_RING
  1254. static void dp_rxdma_ring_config(struct dp_soc *soc)
  1255. {
  1256. int i;
  1257. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1258. struct dp_pdev *pdev = soc->pdev_list[i];
  1259. if (pdev) {
  1260. int mac_id = 0;
  1261. int j;
  1262. bool dbs_enable = 0;
  1263. int max_mac_rings =
  1264. wlan_cfg_get_num_mac_rings
  1265. (pdev->wlan_cfg_ctx);
  1266. htt_srng_setup(soc->htt_handle, 0,
  1267. pdev->rx_refill_buf_ring.hal_srng,
  1268. RXDMA_BUF);
  1269. if (soc->cdp_soc.ol_ops->
  1270. is_hw_dbs_2x2_capable) {
  1271. dbs_enable = soc->cdp_soc.ol_ops->
  1272. is_hw_dbs_2x2_capable();
  1273. }
  1274. if (dbs_enable) {
  1275. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1276. QDF_TRACE_LEVEL_ERROR,
  1277. FL("DBS enabled max_mac_rings %d\n"),
  1278. max_mac_rings);
  1279. } else {
  1280. max_mac_rings = 1;
  1281. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1282. QDF_TRACE_LEVEL_ERROR,
  1283. FL("DBS disabled, max_mac_rings %d\n"),
  1284. max_mac_rings);
  1285. }
  1286. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1287. FL("pdev_id %d max_mac_rings %d\n"),
  1288. pdev->pdev_id, max_mac_rings);
  1289. for (j = 0; j < max_mac_rings; j++) {
  1290. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1291. QDF_TRACE_LEVEL_ERROR,
  1292. FL("mac_id %d\n"), mac_id);
  1293. htt_srng_setup(soc->htt_handle, mac_id,
  1294. pdev->rx_mac_buf_ring[j]
  1295. .hal_srng,
  1296. RXDMA_BUF);
  1297. mac_id++;
  1298. }
  1299. }
  1300. }
  1301. }
  1302. #else
  1303. static void dp_rxdma_ring_config(struct dp_soc *soc)
  1304. {
  1305. int i;
  1306. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1307. struct dp_pdev *pdev = soc->pdev_list[i];
  1308. if (pdev) {
  1309. htt_srng_setup(soc->htt_handle, i,
  1310. pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
  1311. htt_srng_setup(soc->htt_handle, i,
  1312. pdev->rxdma_mon_buf_ring.hal_srng,
  1313. RXDMA_MONITOR_BUF);
  1314. htt_srng_setup(soc->htt_handle, i,
  1315. pdev->rxdma_mon_dst_ring.hal_srng,
  1316. RXDMA_MONITOR_DST);
  1317. htt_srng_setup(soc->htt_handle, i,
  1318. pdev->rxdma_mon_status_ring.hal_srng,
  1319. RXDMA_MONITOR_STATUS);
  1320. htt_srng_setup(soc->htt_handle, i,
  1321. pdev->rxdma_mon_desc_ring.hal_srng,
  1322. RXDMA_MONITOR_DESC);
  1323. }
  1324. }
  1325. }
  1326. #endif
  1327. /*
  1328. * dp_soc_attach_target_wifi3() - SOC initialization in the target
  1329. * @txrx_soc: Datapath SOC handle
  1330. */
  1331. static int dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
  1332. {
  1333. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  1334. htt_soc_attach_target(soc->htt_handle);
  1335. dp_rxdma_ring_config(soc);
  1336. DP_STATS_INIT(soc);
  1337. return 0;
  1338. }
  1339. /*
  1340. * dp_vdev_attach_wifi3() - attach txrx vdev
  1341. * @txrx_pdev: Datapath PDEV handle
  1342. * @vdev_mac_addr: MAC address of the virtual interface
  1343. * @vdev_id: VDEV Id
  1344. * @wlan_op_mode: VDEV operating mode
  1345. *
  1346. * Return: DP VDEV handle on success, NULL on failure
  1347. */
  1348. static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
  1349. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  1350. {
  1351. struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
  1352. struct dp_soc *soc = pdev->soc;
  1353. struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
  1354. if (!vdev) {
  1355. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1356. FL("DP VDEV memory allocation failed"));
  1357. goto fail0;
  1358. }
  1359. vdev->pdev = pdev;
  1360. vdev->vdev_id = vdev_id;
  1361. vdev->opmode = op_mode;
  1362. vdev->osdev = soc->osdev;
  1363. vdev->osif_rx = NULL;
  1364. vdev->osif_rsim_rx_decap = NULL;
  1365. vdev->osif_rx_mon = NULL;
  1366. vdev->osif_tx_free_ext = NULL;
  1367. vdev->osif_vdev = NULL;
  1368. vdev->delete.pending = 0;
  1369. vdev->safemode = 0;
  1370. vdev->drop_unenc = 1;
  1371. #ifdef notyet
  1372. vdev->filters_num = 0;
  1373. #endif
  1374. qdf_mem_copy(
  1375. &vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  1376. vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  1377. vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
  1378. vdev->dscp_tid_map_id = 0;
  1379. /* TODO: Initialize default HTT meta data that will be used in
  1380. * TCL descriptors for packets transmitted from this VDEV
  1381. */
  1382. TAILQ_INIT(&vdev->peer_list);
  1383. /* add this vdev into the pdev's list */
  1384. TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
  1385. pdev->vdev_count++;
  1386. dp_tx_vdev_attach(vdev);
  1387. #ifdef DP_INTR_POLL_BASED
  1388. if (pdev->vdev_count == 1)
  1389. qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
  1390. #endif
  1391. dp_lro_hash_setup(soc);
  1392. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1393. "Created vdev %p (%pM)", vdev, vdev->mac_addr.raw);
  1394. DP_STATS_INIT(vdev);
  1395. return (struct cdp_vdev *)vdev;
  1396. fail0:
  1397. return NULL;
  1398. }
  1399. /**
  1400. * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
  1401. * @vdev: Datapath VDEV handle
  1402. * @osif_vdev: OSIF vdev handle
  1403. * @txrx_ops: Tx and Rx operations
  1404. *
  1405. * Return: DP VDEV handle on success, NULL on failure
  1406. */
  1407. static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
  1408. void *osif_vdev,
  1409. struct ol_txrx_ops *txrx_ops)
  1410. {
  1411. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1412. vdev->osif_vdev = osif_vdev;
  1413. vdev->osif_rx = txrx_ops->rx.rx;
  1414. vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
  1415. vdev->osif_rx_mon = txrx_ops->rx.mon;
  1416. vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
  1417. #ifdef notyet
  1418. #if ATH_SUPPORT_WAPI
  1419. vdev->osif_check_wai = txrx_ops->rx.wai_check;
  1420. #endif
  1421. #if UMAC_SUPPORT_PROXY_ARP
  1422. vdev->osif_proxy_arp = txrx_ops->proxy_arp;
  1423. #endif
  1424. #endif
  1425. /* TODO: Enable the following once Tx code is integrated */
  1426. txrx_ops->tx.tx = dp_tx_send;
  1427. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1428. "DP Vdev Register success");
  1429. }
  1430. /*
  1431. * dp_vdev_detach_wifi3() - Detach txrx vdev
  1432. * @txrx_vdev: Datapath VDEV handle
  1433. * @callback: Callback OL_IF on completion of detach
  1434. * @cb_context: Callback context
  1435. *
  1436. */
  1437. static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
  1438. ol_txrx_vdev_delete_cb callback, void *cb_context)
  1439. {
  1440. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1441. struct dp_pdev *pdev = vdev->pdev;
  1442. struct dp_soc *soc = pdev->soc;
  1443. /* preconditions */
  1444. qdf_assert(vdev);
  1445. /* remove the vdev from its parent pdev's list */
  1446. TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
  1447. /*
  1448. * Use peer_ref_mutex while accessing peer_list, in case
  1449. * a peer is in the process of being removed from the list.
  1450. */
  1451. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1452. /* check that the vdev has no peers allocated */
  1453. if (!TAILQ_EMPTY(&vdev->peer_list)) {
  1454. /* debug print - will be removed later */
  1455. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  1456. FL("not deleting vdev object %p (%pM)"
  1457. "until deletion finishes for all its peers"),
  1458. vdev, vdev->mac_addr.raw);
  1459. /* indicate that the vdev needs to be deleted */
  1460. vdev->delete.pending = 1;
  1461. vdev->delete.callback = callback;
  1462. vdev->delete.context = cb_context;
  1463. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1464. return;
  1465. }
  1466. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1467. dp_tx_vdev_detach(vdev);
  1468. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1469. FL("deleting vdev object %p (%pM)"), vdev, vdev->mac_addr.raw);
  1470. qdf_mem_free(vdev);
  1471. if (callback)
  1472. callback(cb_context);
  1473. }
  1474. /*
  1475. * dp_peer_create_wifi3() - attach txrx peer
  1476. * @txrx_vdev: Datapath VDEV handle
  1477. * @peer_mac_addr: Peer MAC address
  1478. *
  1479. * Return: DP peeer handle on success, NULL on failure
  1480. */
  1481. static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
  1482. uint8_t *peer_mac_addr)
  1483. {
  1484. struct dp_peer *peer;
  1485. int i;
  1486. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1487. struct dp_pdev *pdev;
  1488. struct dp_soc *soc;
  1489. /* preconditions */
  1490. qdf_assert(vdev);
  1491. qdf_assert(peer_mac_addr);
  1492. pdev = vdev->pdev;
  1493. soc = pdev->soc;
  1494. #ifdef notyet
  1495. peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
  1496. soc->mempool_ol_ath_peer);
  1497. #else
  1498. peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
  1499. #endif
  1500. if (!peer)
  1501. return NULL; /* failure */
  1502. qdf_mem_zero(peer, sizeof(struct dp_peer));
  1503. TAILQ_INIT(&peer->ast_entry_list);
  1504. qdf_mem_copy(&peer->self_ast_entry.mac_addr, peer_mac_addr,
  1505. DP_MAC_ADDR_LEN);
  1506. peer->self_ast_entry.peer = peer;
  1507. TAILQ_INSERT_TAIL(&peer->ast_entry_list, &peer->self_ast_entry,
  1508. ast_entry_elem);
  1509. qdf_spinlock_create(&peer->peer_info_lock);
  1510. /* store provided params */
  1511. peer->vdev = vdev;
  1512. qdf_mem_copy(
  1513. &peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
  1514. /* TODO: See of rx_opt_proc is really required */
  1515. peer->rx_opt_proc = soc->rx_opt_proc;
  1516. /* initialize the peer_id */
  1517. for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
  1518. peer->peer_ids[i] = HTT_INVALID_PEER;
  1519. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1520. qdf_atomic_init(&peer->ref_cnt);
  1521. /* keep one reference for attach */
  1522. qdf_atomic_inc(&peer->ref_cnt);
  1523. /* add this peer into the vdev's list */
  1524. TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
  1525. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1526. /* TODO: See if hash based search is required */
  1527. dp_peer_find_hash_add(soc, peer);
  1528. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1529. "vdev %p created peer %p (%pM) ref_cnt: %d",
  1530. vdev, peer, peer->mac_addr.raw,
  1531. qdf_atomic_read(&peer->ref_cnt));
  1532. /*
  1533. * For every peer MAp message search and set if bss_peer
  1534. */
  1535. if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
  1536. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1537. "vdev bss_peer!!!!");
  1538. peer->bss_peer = 1;
  1539. vdev->vap_bss_peer = peer;
  1540. }
  1541. #ifndef CONFIG_WIN
  1542. dp_local_peer_id_alloc(pdev, peer);
  1543. #endif
  1544. DP_STATS_INIT(peer);
  1545. return (void *)peer;
  1546. }
  1547. /*
  1548. * dp_peer_setup_wifi3() - initialize the peer
  1549. * @vdev_hdl: virtual device object
  1550. * @peer: Peer object
  1551. *
  1552. * Return: void
  1553. */
  1554. static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
  1555. {
  1556. struct dp_peer *peer = (struct dp_peer *)peer_hdl;
  1557. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  1558. struct dp_pdev *pdev;
  1559. struct dp_soc *soc;
  1560. bool hash_based = 0;
  1561. /* preconditions */
  1562. qdf_assert(vdev);
  1563. qdf_assert(peer);
  1564. pdev = vdev->pdev;
  1565. soc = pdev->soc;
  1566. dp_peer_rx_init(pdev, peer);
  1567. peer->last_assoc_rcvd = 0;
  1568. peer->last_disassoc_rcvd = 0;
  1569. peer->last_deauth_rcvd = 0;
  1570. hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
  1571. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1572. FL("hash based steering %d\n"), hash_based);
  1573. if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
  1574. /* TODO: Check the destination ring number to be passed to FW */
  1575. soc->cdp_soc.ol_ops->peer_set_default_routing(
  1576. pdev->osif_pdev, peer->mac_addr.raw,
  1577. peer->vdev->vdev_id, hash_based, 1);
  1578. }
  1579. return;
  1580. }
  1581. /*
  1582. * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
  1583. * @vdev_handle: virtual device object
  1584. * @htt_pkt_type: type of pkt
  1585. *
  1586. * Return: void
  1587. */
  1588. static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
  1589. enum htt_cmn_pkt_type val)
  1590. {
  1591. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1592. vdev->tx_encap_type = val;
  1593. }
  1594. /*
  1595. * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
  1596. * @vdev_handle: virtual device object
  1597. * @htt_pkt_type: type of pkt
  1598. *
  1599. * Return: void
  1600. */
  1601. static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
  1602. enum htt_cmn_pkt_type val)
  1603. {
  1604. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1605. vdev->rx_decap_type = val;
  1606. }
  1607. /*
  1608. * dp_peer_authorize() - authorize txrx peer
  1609. * @peer_handle: Datapath peer handle
  1610. * @authorize
  1611. *
  1612. */
  1613. static void dp_peer_authorize(void *peer_handle, uint32_t authorize)
  1614. {
  1615. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1616. struct dp_soc *soc;
  1617. if (peer != NULL) {
  1618. soc = peer->vdev->pdev->soc;
  1619. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1620. peer->authorize = authorize ? 1 : 0;
  1621. #ifdef notyet /* ATH_BAND_STEERING */
  1622. peer->peer_bs_inact_flag = 0;
  1623. peer->peer_bs_inact = soc->pdev_bs_inact_reload;
  1624. #endif
  1625. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1626. }
  1627. }
  1628. /*
  1629. * dp_peer_unref_delete() - unref and delete peer
  1630. * @peer_handle: Datapath peer handle
  1631. *
  1632. */
  1633. void dp_peer_unref_delete(void *peer_handle)
  1634. {
  1635. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1636. struct dp_vdev *vdev = peer->vdev;
  1637. struct dp_pdev *pdev = vdev->pdev;
  1638. struct dp_soc *soc = pdev->soc;
  1639. struct dp_peer *tmppeer;
  1640. int found = 0;
  1641. uint16_t peer_id;
  1642. uint16_t hw_peer_id;
  1643. struct dp_ast_entry *ast_entry;
  1644. /*
  1645. * Hold the lock all the way from checking if the peer ref count
  1646. * is zero until the peer references are removed from the hash
  1647. * table and vdev list (if the peer ref count is zero).
  1648. * This protects against a new HL tx operation starting to use the
  1649. * peer object just after this function concludes it's done being used.
  1650. * Furthermore, the lock needs to be held while checking whether the
  1651. * vdev's list of peers is empty, to make sure that list is not modified
  1652. * concurrently with the empty check.
  1653. */
  1654. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  1655. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1656. "%s: peer %p ref_cnt(before decrement): %d\n", __func__,
  1657. peer, qdf_atomic_read(&peer->ref_cnt));
  1658. if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
  1659. peer_id = peer->peer_ids[0];
  1660. /*
  1661. * Make sure that the reference to the peer in
  1662. * peer object map is removed
  1663. */
  1664. if (peer_id != HTT_INVALID_PEER)
  1665. soc->peer_id_to_obj_map[peer_id] = NULL;
  1666. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1667. "Deleting peer %p (%pM)", peer, peer->mac_addr.raw);
  1668. /* remove the reference to the peer from the hash table */
  1669. dp_peer_find_hash_remove(soc, peer);
  1670. TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
  1671. if (tmppeer == peer) {
  1672. found = 1;
  1673. break;
  1674. }
  1675. }
  1676. if (found) {
  1677. TAILQ_REMOVE(&peer->vdev->peer_list, peer,
  1678. peer_list_elem);
  1679. } else {
  1680. /*Ignoring the remove operation as peer not found*/
  1681. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  1682. "peer %p not found in vdev (%p)->peer_list:%p",
  1683. peer, vdev, &peer->vdev->peer_list);
  1684. }
  1685. /* cleanup the peer data */
  1686. dp_peer_cleanup(vdev, peer);
  1687. /* check whether the parent vdev has no peers left */
  1688. if (TAILQ_EMPTY(&vdev->peer_list)) {
  1689. /*
  1690. * Now that there are no references to the peer, we can
  1691. * release the peer reference lock.
  1692. */
  1693. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1694. /*
  1695. * Check if the parent vdev was waiting for its peers
  1696. * to be deleted, in order for it to be deleted too.
  1697. */
  1698. if (vdev->delete.pending) {
  1699. ol_txrx_vdev_delete_cb vdev_delete_cb =
  1700. vdev->delete.callback;
  1701. void *vdev_delete_context =
  1702. vdev->delete.context;
  1703. QDF_TRACE(QDF_MODULE_ID_DP,
  1704. QDF_TRACE_LEVEL_INFO_HIGH,
  1705. FL("deleting vdev object %p (%pM)"
  1706. " - its last peer is done"),
  1707. vdev, vdev->mac_addr.raw);
  1708. /* all peers are gone, go ahead and delete it */
  1709. qdf_mem_free(vdev);
  1710. if (vdev_delete_cb)
  1711. vdev_delete_cb(vdev_delete_context);
  1712. }
  1713. } else {
  1714. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1715. }
  1716. #ifdef notyet
  1717. qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
  1718. #else
  1719. TAILQ_FOREACH(ast_entry, &peer->ast_entry_list,
  1720. ast_entry_elem) {
  1721. hw_peer_id = ast_entry->ast_idx;
  1722. if (peer->self_ast_entry.ast_idx != hw_peer_id)
  1723. qdf_mem_free(ast_entry);
  1724. else
  1725. peer->self_ast_entry.ast_idx =
  1726. HTT_INVALID_PEER;
  1727. soc->ast_table[hw_peer_id] = NULL;
  1728. }
  1729. qdf_mem_free(peer);
  1730. #endif
  1731. if (soc->cdp_soc.ol_ops->peer_unref_delete) {
  1732. soc->cdp_soc.ol_ops->peer_unref_delete(pdev->osif_pdev,
  1733. vdev->vdev_id, peer->mac_addr.raw);
  1734. }
  1735. } else {
  1736. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  1737. }
  1738. }
  1739. /*
  1740. * dp_peer_detach_wifi3() – Detach txrx peer
  1741. * @peer_handle: Datapath peer handle
  1742. *
  1743. */
  1744. static void dp_peer_delete_wifi3(void *peer_handle)
  1745. {
  1746. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  1747. /* redirect the peer's rx delivery function to point to a
  1748. * discard func
  1749. */
  1750. peer->rx_opt_proc = dp_rx_discard;
  1751. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1752. FL("peer %p (%pM)"), peer, peer->mac_addr.raw);
  1753. #ifndef CONFIG_WIN
  1754. dp_local_peer_id_free(peer->vdev->pdev, peer);
  1755. #endif
  1756. qdf_spinlock_destroy(&peer->peer_info_lock);
  1757. /*
  1758. * Remove the reference added during peer_attach.
  1759. * The peer will still be left allocated until the
  1760. * PEER_UNMAP message arrives to remove the other
  1761. * reference, added by the PEER_MAP message.
  1762. */
  1763. dp_peer_unref_delete(peer_handle);
  1764. }
  1765. /*
  1766. * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
  1767. * @peer_handle: Datapath peer handle
  1768. *
  1769. */
  1770. static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
  1771. {
  1772. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  1773. return vdev->mac_addr.raw;
  1774. }
  1775. /*
  1776. * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
  1777. * @peer_handle: Datapath peer handle
  1778. *
  1779. */
  1780. static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
  1781. uint8_t vdev_id)
  1782. {
  1783. struct dp_pdev *pdev = (struct dp_pdev *)dev;
  1784. struct dp_vdev *vdev = NULL;
  1785. if (qdf_unlikely(!pdev))
  1786. return NULL;
  1787. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1788. if (vdev->vdev_id == vdev_id)
  1789. break;
  1790. }
  1791. return (struct cdp_vdev *)vdev;
  1792. }
  1793. static int dp_get_opmode(struct cdp_vdev *vdev_handle)
  1794. {
  1795. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1796. return vdev->opmode;
  1797. }
  1798. static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
  1799. {
  1800. struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
  1801. struct dp_pdev *pdev = vdev->pdev;
  1802. return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
  1803. }
  1804. /**
  1805. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  1806. * @vdev_handle: Datapath VDEV handle
  1807. *
  1808. * Return: 0 on success, not 0 on failure
  1809. */
  1810. static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle)
  1811. {
  1812. /* Many monitor VAPs can exists in a system but only one can be up at
  1813. * anytime
  1814. */
  1815. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  1816. struct dp_pdev *pdev;
  1817. struct htt_rx_ring_tlv_filter htt_tlv_filter;
  1818. struct dp_soc *soc;
  1819. uint8_t pdev_id;
  1820. qdf_assert(vdev);
  1821. pdev = vdev->pdev;
  1822. pdev_id = pdev->pdev_id;
  1823. soc = pdev->soc;
  1824. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  1825. "pdev=%p, pdev_id=%d, soc=%p vdev=%p\n",
  1826. pdev, pdev_id, soc, vdev);
  1827. /*Check if current pdev's monitor_vdev exists */
  1828. if (pdev->monitor_vdev) {
  1829. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1830. "vdev=%p\n", vdev);
  1831. qdf_assert(vdev);
  1832. }
  1833. pdev->monitor_vdev = vdev;
  1834. htt_tlv_filter.mpdu_start = 1;
  1835. htt_tlv_filter.msdu_start = 1;
  1836. htt_tlv_filter.packet = 1;
  1837. htt_tlv_filter.msdu_end = 1;
  1838. htt_tlv_filter.mpdu_end = 1;
  1839. htt_tlv_filter.packet_header = 1;
  1840. htt_tlv_filter.attention = 1;
  1841. htt_tlv_filter.ppdu_start = 0;
  1842. htt_tlv_filter.ppdu_end = 0;
  1843. htt_tlv_filter.ppdu_end_user_stats = 0;
  1844. htt_tlv_filter.ppdu_end_user_stats_ext = 0;
  1845. htt_tlv_filter.ppdu_end_status_done = 0;
  1846. htt_tlv_filter.enable_fp = 1;
  1847. htt_tlv_filter.enable_md = 0;
  1848. htt_tlv_filter.enable_mo = 1;
  1849. htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  1850. pdev->rxdma_mon_dst_ring.hal_srng,
  1851. RXDMA_MONITOR_BUF, RX_BUFFER_SIZE, &htt_tlv_filter);
  1852. htt_tlv_filter.mpdu_start = 1;
  1853. htt_tlv_filter.msdu_start = 1;
  1854. htt_tlv_filter.packet = 0;
  1855. htt_tlv_filter.msdu_end = 1;
  1856. htt_tlv_filter.mpdu_end = 1;
  1857. htt_tlv_filter.packet_header = 1;
  1858. htt_tlv_filter.attention = 1;
  1859. htt_tlv_filter.ppdu_start = 1;
  1860. htt_tlv_filter.ppdu_end = 1;
  1861. htt_tlv_filter.ppdu_end_user_stats = 1;
  1862. htt_tlv_filter.ppdu_end_user_stats_ext = 1;
  1863. htt_tlv_filter.ppdu_end_status_done = 1;
  1864. htt_tlv_filter.enable_fp = 1;
  1865. htt_tlv_filter.enable_md = 1;
  1866. htt_tlv_filter.enable_mo = 1;
  1867. /*
  1868. * htt_h2t_rx_ring_cfg(soc->htt_handle, pdev_id,
  1869. * pdev->rxdma_mon_status_ring.hal_srng,
  1870. * RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
  1871. */
  1872. return QDF_STATUS_SUCCESS;
  1873. }
  1874. #ifdef MESH_MODE_SUPPORT
  1875. void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
  1876. {
  1877. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  1878. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1879. FL("val %d"), val);
  1880. vdev->mesh_vdev = val;
  1881. }
  1882. /*
  1883. * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
  1884. * @vdev_hdl: virtual device object
  1885. * @val: value to be set
  1886. *
  1887. * Return: void
  1888. */
  1889. void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
  1890. {
  1891. struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
  1892. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  1893. FL("val %d"), val);
  1894. vdev->mesh_rx_filter = val;
  1895. }
  1896. #endif
  1897. /**
  1898. * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
  1899. * @vdev: DP VDEV handle
  1900. *
  1901. * return: void
  1902. */
  1903. void dp_aggregate_vdev_stats(struct dp_vdev *vdev)
  1904. {
  1905. struct dp_peer *peer = NULL;
  1906. int i;
  1907. qdf_mem_set(&(vdev->stats.tx), sizeof(vdev->stats.tx), 0x0);
  1908. qdf_mem_set(&(vdev->stats.rx), sizeof(vdev->stats.rx), 0x0);
  1909. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  1910. if (!peer)
  1911. return;
  1912. for (i = 0; i <= MAX_MCS; i++) {
  1913. DP_STATS_AGGR(vdev, peer, tx.pkt_type[0].mcs_count[i]);
  1914. DP_STATS_AGGR(vdev, peer, tx.pkt_type[1].mcs_count[i]);
  1915. DP_STATS_AGGR(vdev, peer, tx.pkt_type[2].mcs_count[i]);
  1916. DP_STATS_AGGR(vdev, peer, tx.pkt_type[3].mcs_count[i]);
  1917. DP_STATS_AGGR(vdev, peer, tx.pkt_type[4].mcs_count[i]);
  1918. DP_STATS_AGGR(vdev, peer, rx.mcs_count[i]);
  1919. }
  1920. for (i = 0; i < SUPPORTED_BW; i++) {
  1921. DP_STATS_AGGR(vdev, peer, tx.bw[i]);
  1922. DP_STATS_AGGR(vdev, peer, rx.bw[i]);
  1923. }
  1924. for (i = 0; i < SS_COUNT; i++)
  1925. DP_STATS_AGGR(vdev, peer, rx.nss[i]);
  1926. for (i = 0; i < WME_AC_MAX; i++) {
  1927. DP_STATS_AGGR(vdev, peer, tx.wme_ac_type[i]);
  1928. DP_STATS_AGGR(vdev, peer, rx.wme_ac_type[i]);
  1929. DP_STATS_AGGR(vdev, peer, tx.excess_retries_ac[i]);
  1930. }
  1931. for (i = 0; i < MAX_MCS + 1; i++) {
  1932. DP_STATS_AGGR(vdev, peer, tx.sgi_count[i]);
  1933. DP_STATS_AGGR(vdev, peer, rx.sgi_count[i]);
  1934. }
  1935. DP_STATS_AGGR_PKT(vdev, peer, tx.comp_pkt);
  1936. DP_STATS_AGGR_PKT(vdev, peer, tx.ucast);
  1937. DP_STATS_AGGR_PKT(vdev, peer, tx.mcast);
  1938. DP_STATS_AGGR_PKT(vdev, peer, tx.tx_success);
  1939. DP_STATS_AGGR(vdev, peer, tx.tx_failed);
  1940. DP_STATS_AGGR(vdev, peer, tx.ofdma);
  1941. DP_STATS_AGGR(vdev, peer, tx.stbc);
  1942. DP_STATS_AGGR(vdev, peer, tx.ldpc);
  1943. DP_STATS_AGGR(vdev, peer, tx.retries);
  1944. DP_STATS_AGGR(vdev, peer, tx.non_amsdu_cnt);
  1945. DP_STATS_AGGR(vdev, peer, tx.amsdu_cnt);
  1946. DP_STATS_AGGR(vdev, peer, tx.dropped.dma_map_error);
  1947. DP_STATS_AGGR(vdev, peer, tx.dropped.ring_full);
  1948. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard);
  1949. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_retired);
  1950. DP_STATS_AGGR(vdev, peer, tx.dropped.mpdu_age_out);
  1951. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason1);
  1952. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason2);
  1953. DP_STATS_AGGR(vdev, peer, tx.dropped.fw_discard_reason3);
  1954. DP_STATS_AGGR(vdev, peer, rx.err.mic_err);
  1955. DP_STATS_AGGR(vdev, peer, rx.err.decrypt_err);
  1956. DP_STATS_AGGR(vdev, peer, rx.non_ampdu_cnt);
  1957. DP_STATS_AGGR(vdev, peer, rx.ampdu_cnt);
  1958. DP_STATS_AGGR(vdev, peer, rx.non_amsdu_cnt);
  1959. DP_STATS_AGGR(vdev, peer, rx.amsdu_cnt);
  1960. DP_STATS_AGGR_PKT(vdev, peer, rx.to_stack);
  1961. DP_STATS_AGGR_PKT(vdev, peer, rx.rcvd_reo);
  1962. DP_STATS_AGGR_PKT(vdev, peer, rx.unicast);
  1963. DP_STATS_AGGR_PKT(vdev, peer, rx.multicast);
  1964. DP_STATS_AGGR_PKT(vdev, peer, rx.wds);
  1965. DP_STATS_AGGR_PKT(vdev, peer, rx.raw);
  1966. DP_STATS_AGGR_PKT(vdev, peer, rx.intra_bss);
  1967. vdev->stats.tx.last_ack_rssi =
  1968. peer->stats.tx.last_ack_rssi;
  1969. }
  1970. }
  1971. /**
  1972. * dp_aggregate_pdev_stats(): Consolidate stats at PDEV level
  1973. * @pdev: DP PDEV handle
  1974. *
  1975. * return: void
  1976. */
  1977. static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
  1978. {
  1979. struct dp_vdev *vdev = NULL;
  1980. uint8_t i;
  1981. qdf_mem_set(&(pdev->stats.tx), sizeof(pdev->stats.tx), 0x0);
  1982. qdf_mem_set(&(pdev->stats.rx), sizeof(pdev->stats.rx), 0x0);
  1983. qdf_mem_set(&(pdev->stats.tx_i), sizeof(pdev->stats.tx_i), 0x0);
  1984. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1985. if (!vdev)
  1986. return;
  1987. dp_aggregate_vdev_stats(vdev);
  1988. for (i = 0; i <= MAX_MCS; i++) {
  1989. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[0].mcs_count[i]);
  1990. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[1].mcs_count[i]);
  1991. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[2].mcs_count[i]);
  1992. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[3].mcs_count[i]);
  1993. DP_STATS_AGGR(pdev, vdev, tx.pkt_type[4].mcs_count[i]);
  1994. DP_STATS_AGGR(pdev, vdev, rx.mcs_count[i]);
  1995. }
  1996. for (i = 0; i < SUPPORTED_BW; i++) {
  1997. DP_STATS_AGGR(pdev, vdev, tx.bw[i]);
  1998. DP_STATS_AGGR(pdev, vdev, rx.bw[i]);
  1999. }
  2000. for (i = 0; i < SS_COUNT; i++)
  2001. DP_STATS_AGGR(pdev, vdev, rx.nss[i]);
  2002. for (i = 0; i < WME_AC_MAX; i++) {
  2003. DP_STATS_AGGR(pdev, vdev, tx.wme_ac_type[i]);
  2004. DP_STATS_AGGR(pdev, vdev, rx.wme_ac_type[i]);
  2005. DP_STATS_AGGR(pdev, vdev,
  2006. tx.excess_retries_ac[i]);
  2007. }
  2008. for (i = 0; i < MAX_MCS + 1; i++) {
  2009. DP_STATS_AGGR(pdev, vdev, tx.sgi_count[i]);
  2010. DP_STATS_AGGR(pdev, vdev, rx.sgi_count[i]);
  2011. }
  2012. DP_STATS_AGGR_PKT(pdev, vdev, tx.comp_pkt);
  2013. DP_STATS_AGGR_PKT(pdev, vdev, tx.ucast);
  2014. DP_STATS_AGGR_PKT(pdev, vdev, tx.mcast);
  2015. DP_STATS_AGGR_PKT(pdev, vdev, tx.tx_success);
  2016. DP_STATS_AGGR(pdev, vdev, tx.tx_failed);
  2017. DP_STATS_AGGR(pdev, vdev, tx.ofdma);
  2018. DP_STATS_AGGR(pdev, vdev, tx.stbc);
  2019. DP_STATS_AGGR(pdev, vdev, tx.ldpc);
  2020. DP_STATS_AGGR(pdev, vdev, tx.retries);
  2021. DP_STATS_AGGR(pdev, vdev, tx.non_amsdu_cnt);
  2022. DP_STATS_AGGR(pdev, vdev, tx.amsdu_cnt);
  2023. DP_STATS_AGGR(pdev, vdev, tx.dropped.dma_map_error);
  2024. DP_STATS_AGGR(pdev, vdev, tx.dropped.ring_full);
  2025. DP_STATS_AGGR(pdev, vdev, tx.dropped.fw_discard);
  2026. DP_STATS_AGGR(pdev, vdev,
  2027. tx.dropped.fw_discard_retired);
  2028. DP_STATS_AGGR(pdev, vdev, tx.dropped.mpdu_age_out);
  2029. DP_STATS_AGGR(pdev, vdev,
  2030. tx.dropped.fw_discard_reason1);
  2031. DP_STATS_AGGR(pdev, vdev,
  2032. tx.dropped.fw_discard_reason2);
  2033. DP_STATS_AGGR(pdev, vdev,
  2034. tx.dropped.fw_discard_reason3);
  2035. DP_STATS_AGGR(pdev, vdev, rx.err.mic_err);
  2036. DP_STATS_AGGR(pdev, vdev, rx.err.decrypt_err);
  2037. DP_STATS_AGGR(pdev, vdev, rx.non_ampdu_cnt);
  2038. DP_STATS_AGGR(pdev, vdev, rx.ampdu_cnt);
  2039. DP_STATS_AGGR(pdev, vdev, rx.non_amsdu_cnt);
  2040. DP_STATS_AGGR(pdev, vdev, rx.amsdu_cnt);
  2041. DP_STATS_AGGR_PKT(pdev, vdev, rx.to_stack);
  2042. DP_STATS_AGGR_PKT(pdev, vdev, rx.rcvd_reo);
  2043. DP_STATS_AGGR_PKT(pdev, vdev, rx.unicast);
  2044. DP_STATS_AGGR_PKT(pdev, vdev, rx.multicast);
  2045. DP_STATS_AGGR_PKT(pdev, vdev, rx.wds);
  2046. DP_STATS_AGGR_PKT(pdev, vdev, rx.intra_bss);
  2047. DP_STATS_AGGR_PKT(pdev, vdev, rx.raw);
  2048. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.rcvd);
  2049. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.freed);
  2050. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.processed);
  2051. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.reinject_pkts);
  2052. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.inspect_pkts);
  2053. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.raw_pkt);
  2054. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.tso.tso_pkt);
  2055. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_host);
  2056. DP_STATS_AGGR(pdev, vdev, tx_i.tso.dropped_target);
  2057. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_host);
  2058. DP_STATS_AGGR(pdev, vdev, tx_i.sg.dropped_target);
  2059. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.sg.sg_pkt);
  2060. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.mcast_en.mcast_pkt);
  2061. DP_STATS_AGGR(pdev, vdev,
  2062. tx_i.mcast_en.dropped_map_error);
  2063. DP_STATS_AGGR(pdev, vdev,
  2064. tx_i.mcast_en.dropped_self_mac);
  2065. DP_STATS_AGGR(pdev, vdev,
  2066. tx_i.mcast_en.dropped_send_fail);
  2067. DP_STATS_AGGR(pdev, vdev, tx_i.mcast_en.ucast);
  2068. DP_STATS_AGGR_PKT(pdev, vdev, tx_i.dropped.dropped_pkt);
  2069. pdev->stats.tx.last_ack_rssi =
  2070. vdev->stats.tx.last_ack_rssi;
  2071. pdev->stats.tx_i.tso.num_seg =
  2072. vdev->stats.tx_i.tso.num_seg;
  2073. }
  2074. }
  2075. /**
  2076. * dp_print_pdev_tx_stats(): Print Pdev level TX stats
  2077. * @pdev: DP_PDEV Handle
  2078. *
  2079. * Return:void
  2080. */
  2081. static inline void
  2082. dp_print_pdev_tx_stats(struct dp_pdev *pdev)
  2083. {
  2084. DP_TRACE(NONE, "WLAN Tx Stats:\n");
  2085. DP_TRACE(NONE, "Received From Stack:\n");
  2086. DP_TRACE(NONE, "Total Packets Received = %d",
  2087. pdev->stats.tx_i.rcvd.num);
  2088. DP_TRACE(NONE, "Bytes Sent = %d",
  2089. pdev->stats.tx_i.rcvd.bytes);
  2090. DP_TRACE(NONE, "Processed:\n");
  2091. DP_TRACE(NONE, "Msdu Processed = %d",
  2092. pdev->stats.tx_i.processed.num);
  2093. DP_TRACE(NONE, "Bytes Processed = %d",
  2094. pdev->stats.tx_i.processed.bytes);
  2095. DP_TRACE(NONE, "Completions:\n");
  2096. DP_TRACE(NONE, "Msdu Sent = %d",
  2097. pdev->stats.tx.comp_pkt.num);
  2098. DP_TRACE(NONE, "Bytes Sent = %d",
  2099. pdev->stats.tx.comp_pkt.bytes);
  2100. DP_TRACE(NONE, "Freed:\n");
  2101. DP_TRACE(NONE, "Msdus Freed = %d",
  2102. pdev->stats.tx_i.freed.num);
  2103. DP_TRACE(NONE, "Bytes Freed = %d",
  2104. pdev->stats.tx_i.freed.bytes);
  2105. DP_TRACE(NONE, "Dropped:\n");
  2106. DP_TRACE(NONE, "Total Packets Dropped = %d",
  2107. pdev->stats.tx_i.dropped.dropped_pkt.num);
  2108. DP_TRACE(NONE, "Bytes Dropped = %d",
  2109. pdev->stats.tx_i.dropped.dropped_pkt.bytes);
  2110. DP_TRACE(NONE, "Dma_map_error = %d",
  2111. pdev->stats.tx.dropped.dma_map_error);
  2112. DP_TRACE(NONE, "Ring Full = %d", pdev->stats.tx.dropped.ring_full);
  2113. DP_TRACE(NONE, "Fw Discard = %d",
  2114. pdev->stats.tx.dropped.fw_discard);
  2115. DP_TRACE(NONE, "Fw Discard Retired = %d",
  2116. pdev->stats.tx.dropped.fw_discard_retired);
  2117. DP_TRACE(NONE, "Firmware Discard Untransmitted = %d",
  2118. pdev->stats.tx.dropped.fw_discard_untransmitted);
  2119. DP_TRACE(NONE, "Mpdu Age Out = %d",
  2120. pdev->stats.tx.dropped.mpdu_age_out);
  2121. DP_TRACE(NONE, "Firmware Discard Reason1 = %d",
  2122. pdev->stats.tx.dropped.fw_discard_reason1);
  2123. DP_TRACE(NONE, "Firmware Discard Reason2 = %d",
  2124. pdev->stats.tx.dropped.fw_discard_reason2);
  2125. DP_TRACE(NONE, "Firmware Discard Reason3 = %d",
  2126. pdev->stats.tx.dropped.fw_discard_reason3);
  2127. DP_TRACE(NONE, "Scatter Gather:\n");
  2128. DP_TRACE(NONE, "Total Packets = %d",
  2129. pdev->stats.tx_i.sg.sg_pkt.num);
  2130. DP_TRACE(NONE, "Total Bytes = %d",
  2131. pdev->stats.tx_i.sg.sg_pkt.bytes);
  2132. DP_TRACE(NONE, "Dropped By Host = %d",
  2133. pdev->stats.tx_i.sg.dropped_host);
  2134. DP_TRACE(NONE, "Dropped By Target = %d",
  2135. pdev->stats.tx_i.sg.dropped_target);
  2136. DP_TRACE(NONE, "Tso:\n");
  2137. DP_TRACE(NONE, "Number of Segments = %d",
  2138. pdev->stats.tx_i.tso.num_seg);
  2139. DP_TRACE(NONE, "Number Packets = %d",
  2140. pdev->stats.tx_i.tso.tso_pkt.num);
  2141. DP_TRACE(NONE, "Total Bytes = %d",
  2142. pdev->stats.tx_i.tso.tso_pkt.bytes);
  2143. DP_TRACE(NONE, "Dropped By Host = %d",
  2144. pdev->stats.tx_i.tso.dropped_host);
  2145. DP_TRACE(NONE, "Mcast Enhancement:\n");
  2146. DP_TRACE(NONE, "Dropped: Map Errors = %d",
  2147. pdev->stats.tx_i.mcast_en.dropped_map_error);
  2148. DP_TRACE(NONE, "Dropped: Self Mac = %d",
  2149. pdev->stats.tx_i.mcast_en.dropped_self_mac);
  2150. DP_TRACE(NONE, "Dropped: Send Fail = %d",
  2151. pdev->stats.tx_i.mcast_en.dropped_send_fail);
  2152. DP_TRACE(NONE, "Total Unicast sent = %d",
  2153. pdev->stats.tx_i.mcast_en.ucast);
  2154. }
  2155. /**
  2156. * dp_print_pdev_rx_stats(): Print Pdev level RX stats
  2157. * @pdev: DP_PDEV Handle
  2158. *
  2159. * Return: void
  2160. */
  2161. static inline void
  2162. dp_print_pdev_rx_stats(struct dp_pdev *pdev)
  2163. {
  2164. DP_TRACE(NONE, "WLAN Rx Stats:\n");
  2165. DP_TRACE(NONE, "Received From HW (Reo Dest Ring):\n");
  2166. DP_TRACE(NONE, "Total Packets Received = %d",
  2167. pdev->stats.rx.rcvd_reo.num);
  2168. DP_TRACE(NONE, "Bytes Sent = %d",
  2169. pdev->stats.rx.rcvd_reo.bytes);
  2170. DP_TRACE(NONE, "Replenished:\n");
  2171. DP_TRACE(NONE, "Total Packets Replenished = %d",
  2172. pdev->stats.replenished.num);
  2173. DP_TRACE(NONE, "Bytes Sent = %d",
  2174. pdev->stats.replenished.bytes);
  2175. DP_TRACE(NONE, "Buffers Added To Freelist = %d",
  2176. pdev->stats.buf_freelist);
  2177. DP_TRACE(NONE, "Dropped:\n");
  2178. DP_TRACE(NONE, "Total Packets With Msdu Not Done = %d",
  2179. pdev->stats.dropped.msdu_not_done.num);
  2180. DP_TRACE(NONE, "Bytes Sent With Msdu Not Done = %d",
  2181. pdev->stats.dropped.msdu_not_done.bytes);
  2182. DP_TRACE(NONE, "Sent To Stack:\n");
  2183. DP_TRACE(NONE, "Packets Sent To Stack = %d",
  2184. pdev->stats.rx.to_stack.num);
  2185. DP_TRACE(NONE, "Bytes Sent To Stack = %d",
  2186. pdev->stats.rx.to_stack.bytes);
  2187. DP_TRACE(NONE, "Errors:\n");
  2188. DP_TRACE(NONE, "Rxdma Ring Unititalized: %d",
  2189. pdev->stats.err.rxdma_unitialized);
  2190. DP_TRACE(NONE, "Desc Alloc Failed: %d",
  2191. pdev->stats.err.desc_alloc_fail);
  2192. }
  2193. /**
  2194. * dp_print_soc_tx_stats(): Print SOC level stats
  2195. * @soc DP_SOC Handle
  2196. *
  2197. * Return: void
  2198. */
  2199. static inline void
  2200. dp_print_soc_tx_stats(struct dp_soc *soc)
  2201. {
  2202. DP_TRACE(NONE, "SOC Tx Stats:\n");
  2203. DP_TRACE(NONE, "Tx Descriptors In Use = %d",
  2204. soc->stats.tx.desc_in_use);
  2205. DP_TRACE(NONE, "Total Packets With No Peer = %d",
  2206. soc->stats.tx.tx_invalid_peer.num);
  2207. DP_TRACE(NONE, "Bytes Sent With No Peer = %d",
  2208. soc->stats.tx.tx_invalid_peer.bytes);
  2209. }
  2210. /**
  2211. * dp_print_soc_rx_stats: Print SOC level Rx stats
  2212. * @soc: DP_SOC Handle
  2213. *
  2214. * Return:void
  2215. */
  2216. static inline void
  2217. dp_print_soc_rx_stats(struct dp_soc *soc)
  2218. {
  2219. uint32_t i;
  2220. char reo_error[DP_REO_ERR_LENGTH];
  2221. char rxdma_error[DP_RXDMA_ERR_LENGTH];
  2222. uint8_t index = 0;
  2223. DP_TRACE(NONE, "SOC Rx Stats:\n");
  2224. DP_TRACE(NONE, "Errors:\n");
  2225. DP_TRACE(NONE, "Invalid RBM = %d",
  2226. soc->stats.rx.err.invalid_rbm);
  2227. DP_TRACE(NONE, "Invalid Vdev = %d",
  2228. soc->stats.rx.err.invalid_vdev);
  2229. DP_TRACE(NONE, "Invalid Pdev = %d",
  2230. soc->stats.rx.err.invalid_pdev);
  2231. DP_TRACE(NONE, "Invalid Peer = %d",
  2232. soc->stats.rx.err.rx_invalid_peer.num);
  2233. DP_TRACE(NONE, "HAL Ring Access Fail = %d",
  2234. soc->stats.rx.err.hal_ring_access_fail);
  2235. for (i = 0; i < MAX_RXDMA_ERRORS; i++) {
  2236. index += qdf_snprint(&rxdma_error[index],
  2237. DP_RXDMA_ERR_LENGTH - index,
  2238. " %d,", soc->stats.rx.err.rxdma_error[i]);
  2239. }
  2240. DP_TRACE(NONE, "RXDMA Error (0-31):%s",
  2241. rxdma_error);
  2242. index = 0;
  2243. for (i = 0; i < REO_ERROR_TYPE_MAX; i++) {
  2244. index += qdf_snprint(&reo_error[index],
  2245. DP_REO_ERR_LENGTH - index,
  2246. " %d,", soc->stats.rx.err.reo_error[i]);
  2247. }
  2248. DP_TRACE(NONE, "REO Error(0-14):%s",
  2249. reo_error);
  2250. }
  2251. /**
  2252. * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
  2253. * @vdev: DP_VDEV handle
  2254. *
  2255. * Return:void
  2256. */
  2257. static inline void
  2258. dp_txrx_host_stats_clr(struct dp_vdev *vdev)
  2259. {
  2260. struct dp_peer *peer = NULL;
  2261. DP_STATS_CLR(vdev->pdev);
  2262. DP_STATS_CLR(vdev->pdev->soc);
  2263. DP_STATS_CLR(vdev);
  2264. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2265. if (!peer)
  2266. return;
  2267. DP_STATS_CLR(peer);
  2268. }
  2269. }
  2270. /**
  2271. * dp_print_rx_rates(): Print Rx rate stats
  2272. * @vdev: DP_VDEV handle
  2273. *
  2274. * Return:void
  2275. */
  2276. static inline void
  2277. dp_print_rx_rates(struct dp_vdev *vdev)
  2278. {
  2279. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  2280. uint8_t i;
  2281. uint8_t index = 0;
  2282. char mcs[DP_MCS_LENGTH];
  2283. char nss[DP_NSS_LENGTH];
  2284. DP_TRACE(NONE, "Rx Rate Info:\n");
  2285. for (i = 0; i < MAX_MCS; i++) {
  2286. index += qdf_snprint(&mcs[index], DP_MCS_LENGTH - index,
  2287. " %d,", pdev->stats.rx.mcs_count[i]);
  2288. }
  2289. DP_TRACE(NONE, "MCS(0-11):%s",
  2290. mcs);
  2291. index = 0;
  2292. for (i = 0; i < SS_COUNT; i++) {
  2293. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  2294. " %d,", pdev->stats.rx.nss[i]);
  2295. }
  2296. DP_TRACE(NONE, "NSS(0-7):%s",
  2297. nss);
  2298. DP_TRACE(NONE, "SGI:"
  2299. " 0.8us %d,"
  2300. " 0.4us %d,"
  2301. " 1.6us %d,"
  2302. " 3.2us %d,",
  2303. pdev->stats.rx.sgi_count[0],
  2304. pdev->stats.rx.sgi_count[1],
  2305. pdev->stats.rx.sgi_count[2],
  2306. pdev->stats.rx.sgi_count[3]);
  2307. DP_TRACE(NONE, "BW Counts: 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  2308. pdev->stats.rx.bw[0], pdev->stats.rx.bw[1],
  2309. pdev->stats.rx.bw[2], pdev->stats.rx.bw[3]);
  2310. DP_TRACE(NONE, "Reception Type:"
  2311. " SU: %d,"
  2312. " MU_MIMO:%d,"
  2313. " MU_OFDMA:%d,"
  2314. " MU_OFDMA_MIMO:%d",
  2315. pdev->stats.rx.reception_type[0],
  2316. pdev->stats.rx.reception_type[1],
  2317. pdev->stats.rx.reception_type[2],
  2318. pdev->stats.rx.reception_type[3]);
  2319. DP_TRACE(NONE, "Aggregation:\n");
  2320. DP_TRACE(NONE, "Number of Msdu's Part of Ampdus = %d",
  2321. pdev->stats.rx.ampdu_cnt);
  2322. DP_TRACE(NONE, "Number of Msdu's With No Mpdu Level Aggregation : %d",
  2323. pdev->stats.rx.non_ampdu_cnt);
  2324. DP_TRACE(NONE, "Number of Msdu's Part of Amsdu: %d",
  2325. pdev->stats.rx.amsdu_cnt);
  2326. DP_TRACE(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
  2327. pdev->stats.rx.non_amsdu_cnt);
  2328. }
  2329. /**
  2330. * dp_print_tx_rates(): Print tx rates
  2331. * @vdev: DP_VDEV handle
  2332. *
  2333. * Return:void
  2334. */
  2335. static inline void
  2336. dp_print_tx_rates(struct dp_vdev *vdev)
  2337. {
  2338. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  2339. uint8_t i, pkt_type;
  2340. char mcs[DOT11_MAX][DP_MCS_LENGTH];
  2341. uint32_t index;
  2342. DP_TRACE(NONE, "Tx Rate Info:\n");
  2343. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2344. index = 0;
  2345. for (i = 0; i < MAX_MCS; i++) {
  2346. index += qdf_snprint(&mcs[pkt_type][index],
  2347. DP_MCS_LENGTH - index,
  2348. " %d ",
  2349. pdev->stats.tx.pkt_type[pkt_type].
  2350. mcs_count[i]);
  2351. }
  2352. }
  2353. DP_TRACE(NONE, "Packet Type 11A MCS(0-7):%s",
  2354. mcs[0]);
  2355. DP_TRACE(NONE, "Packet Type 11A MCS Invalid = %d",
  2356. pdev->stats.tx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2357. DP_TRACE(NONE, "Packet Type 11B MCS(0-6):%s",
  2358. mcs[1]);
  2359. DP_TRACE(NONE, "Packet Type 11B MCS Invalid = %d",
  2360. pdev->stats.tx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2361. DP_TRACE(NONE, "Packet Type 11N MCS(0-7):%s",
  2362. mcs[2]);
  2363. DP_TRACE(NONE, "Packet Type 11N MCS Invalid = %d",
  2364. pdev->stats.tx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2365. DP_TRACE(NONE, "Packet Type 11AC MCS(0-9):%s",
  2366. mcs[3]);
  2367. DP_TRACE(NONE, "Packet Type 11AC MCS Invalid = %d",
  2368. pdev->stats.tx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2369. DP_TRACE(NONE, "Packet Type 11AX MCS(0-11):%s",
  2370. mcs[4]);
  2371. DP_TRACE(NONE, "Packet Type 11AX MCS Invalid = %d",
  2372. pdev->stats.tx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2373. DP_TRACE(NONE, "SGI:"
  2374. " 0.8us %d,"
  2375. " 0.4us %d,"
  2376. " 1.6us %d,"
  2377. " 3.2us %d,",
  2378. pdev->stats.tx.sgi_count[0],
  2379. pdev->stats.tx.sgi_count[1],
  2380. pdev->stats.tx.sgi_count[2],
  2381. pdev->stats.tx.sgi_count[3]);
  2382. DP_TRACE(NONE, "BW Counts: 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  2383. pdev->stats.tx.bw[0], pdev->stats.tx.bw[1],
  2384. pdev->stats.tx.bw[2], pdev->stats.tx.bw[3]);
  2385. DP_TRACE(NONE, "Aggregation:\n");
  2386. DP_TRACE(NONE, "Number of Msdu's Part of Amsdu: %d",
  2387. pdev->stats.tx.amsdu_cnt);
  2388. DP_TRACE(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
  2389. pdev->stats.tx.non_amsdu_cnt);
  2390. }
  2391. /**
  2392. * dp_print_peer_stats():print peer stats
  2393. * @peer: DP_PEER handle
  2394. *
  2395. * return void
  2396. */
  2397. static inline void dp_print_peer_stats(struct dp_peer *peer)
  2398. {
  2399. uint8_t i, pkt_type;
  2400. char mcs[DOT11_MAX][DP_MCS_LENGTH];
  2401. uint32_t index;
  2402. char nss[DP_NSS_LENGTH];
  2403. char mcs_rx[DP_MCS_LENGTH];
  2404. DP_TRACE(NONE, "Node Tx Stats:\n");
  2405. DP_TRACE(NONE, "Total Packet Completions %d",
  2406. peer->stats.tx.comp_pkt.num);
  2407. DP_TRACE(NONE, "Total Bytes Completions %d",
  2408. peer->stats.tx.comp_pkt.bytes);
  2409. DP_TRACE(NONE, "Success Packets %d",
  2410. peer->stats.tx.tx_success.num);
  2411. DP_TRACE(NONE, "Success Bytes %d",
  2412. peer->stats.tx.tx_success.bytes);
  2413. DP_TRACE(NONE, "Packets Failed %d",
  2414. peer->stats.tx.tx_failed);
  2415. DP_TRACE(NONE, "Packets In OFDMA %d",
  2416. peer->stats.tx.ofdma);
  2417. DP_TRACE(NONE, "Packets In STBC %d",
  2418. peer->stats.tx.stbc);
  2419. DP_TRACE(NONE, "Packets In LDPC %d",
  2420. peer->stats.tx.ldpc);
  2421. DP_TRACE(NONE, "Packet Retries %d",
  2422. peer->stats.tx.retries);
  2423. DP_TRACE(NONE, "Msdu's Not Part of Ampdu %d",
  2424. peer->stats.tx.non_amsdu_cnt);
  2425. DP_TRACE(NONE, "Mpdu's Part of Ampdu %d",
  2426. peer->stats.tx.amsdu_cnt);
  2427. DP_TRACE(NONE, "Last Packet RSSI %d",
  2428. peer->stats.tx.last_ack_rssi);
  2429. DP_TRACE(NONE, "Dropped At Host: Due To DMA Map Error %d",
  2430. peer->stats.tx.dropped.dma_map_error);
  2431. DP_TRACE(NONE, "Dropped At Host: Due To Ring Full %d",
  2432. peer->stats.tx.dropped.ring_full);
  2433. DP_TRACE(NONE, "Dropped At FW: FW Discard %d",
  2434. peer->stats.tx.dropped.fw_discard);
  2435. DP_TRACE(NONE, "Dropped At FW: FW Discard Retired %d",
  2436. peer->stats.tx.dropped.fw_discard_retired);
  2437. DP_TRACE(NONE, "Dropped At FW: FW Discard Untransmitted %d",
  2438. peer->stats.tx.dropped.fw_discard_untransmitted);
  2439. DP_TRACE(NONE, "Dropped : Mpdu Age Out %d",
  2440. peer->stats.tx.dropped.mpdu_age_out);
  2441. DP_TRACE(NONE, "Dropped : FW Discard Reason1 %d",
  2442. peer->stats.tx.dropped.fw_discard_reason1);
  2443. DP_TRACE(NONE, "Dropped : FW Discard Reason2 %d",
  2444. peer->stats.tx.dropped.fw_discard_reason2);
  2445. DP_TRACE(NONE, "Dropped : FW Discard Reason3 %d",
  2446. peer->stats.tx.dropped.fw_discard_reason3);
  2447. for (pkt_type = 0; pkt_type < DOT11_MAX; pkt_type++) {
  2448. index = 0;
  2449. for (i = 0; i < MAX_MCS; i++) {
  2450. index += qdf_snprint(&mcs[pkt_type][index],
  2451. DP_MCS_LENGTH - index,
  2452. " %d ",
  2453. peer->stats.tx.pkt_type[pkt_type].
  2454. mcs_count[i]);
  2455. }
  2456. }
  2457. DP_TRACE(NONE, "Packet Type 11A MCS(0-7):%s",
  2458. mcs[0]);
  2459. DP_TRACE(NONE, "Packet Type 11A MCS Invalid = %d",
  2460. peer->stats.tx.pkt_type[DOT11_A].mcs_count[MAX_MCS]);
  2461. DP_TRACE(NONE, "Packet Type 11B MCS(0-6):%s",
  2462. mcs[1]);
  2463. DP_TRACE(NONE, "Packet Type 11B MCS Invalid = %d",
  2464. peer->stats.tx.pkt_type[DOT11_B].mcs_count[MAX_MCS]);
  2465. DP_TRACE(NONE, "Packet Type 11N MCS(0-7):%s",
  2466. mcs[2]);
  2467. DP_TRACE(NONE, "Packet Type 11N MCS Invalid = %d",
  2468. peer->stats.tx.pkt_type[DOT11_N].mcs_count[MAX_MCS]);
  2469. DP_TRACE(NONE, "Packet Type 11AC MCS(0-9):%s",
  2470. mcs[3]);
  2471. DP_TRACE(NONE, "Packet Type 11AC MCS Invalid = %d",
  2472. peer->stats.tx.pkt_type[DOT11_AC].mcs_count[MAX_MCS]);
  2473. DP_TRACE(NONE, "Packet Type 11AX MCS(0-11):%s",
  2474. mcs[4]);
  2475. DP_TRACE(NONE, "Packet Type 11AX MCS Invalid = %d",
  2476. peer->stats.tx.pkt_type[DOT11_AX].mcs_count[MAX_MCS]);
  2477. DP_TRACE(NONE, "SGI:"
  2478. " 0.8us %d,"
  2479. " 0.4us %d,"
  2480. " 1.6us %d,"
  2481. " 3.2us %d,",
  2482. peer->stats.tx.sgi_count[0],
  2483. peer->stats.tx.sgi_count[1],
  2484. peer->stats.tx.sgi_count[2],
  2485. peer->stats.tx.sgi_count[3]);
  2486. DP_TRACE(NONE, "BW Counts: 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  2487. peer->stats.tx.bw[0], peer->stats.tx.bw[1],
  2488. peer->stats.tx.bw[2], peer->stats.tx.bw[3]);
  2489. DP_TRACE(NONE, "Aggregation:\n");
  2490. DP_TRACE(NONE, "Number of Msdu's Part of Amsdu: %d",
  2491. peer->stats.tx.amsdu_cnt);
  2492. DP_TRACE(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
  2493. peer->stats.tx.non_amsdu_cnt);
  2494. DP_TRACE(NONE, "Node Rx Stats:\n");
  2495. DP_TRACE(NONE, "Packets Sent To Stack %d",
  2496. peer->stats.rx.to_stack.num);
  2497. DP_TRACE(NONE, "Bytes Sent To Stack %d",
  2498. peer->stats.rx.to_stack.bytes);
  2499. DP_TRACE(NONE, "Packets Received %d", peer->stats.rx.rcvd_reo.num);
  2500. DP_TRACE(NONE, "Bytes Received %d", peer->stats.rx.rcvd_reo.bytes);
  2501. DP_TRACE(NONE, "Unicast Packets Received %d",
  2502. peer->stats.rx.unicast.num);
  2503. DP_TRACE(NONE, "Unicast Bytes Received %d",
  2504. peer->stats.rx.unicast.bytes);
  2505. DP_TRACE(NONE, "Multicast Packets Received %d",
  2506. peer->stats.rx.multicast.num);
  2507. DP_TRACE(NONE, "Multicast Bytes Received %d",
  2508. peer->stats.rx.multicast.bytes);
  2509. DP_TRACE(NONE, "WDS Packets Received %d",
  2510. peer->stats.rx.wds.num);
  2511. DP_TRACE(NONE, "WDS Bytes Received %d",
  2512. peer->stats.rx.wds.bytes);
  2513. DP_TRACE(NONE, "Intra BSS Packets Received %d",
  2514. peer->stats.rx.intra_bss.num);
  2515. DP_TRACE(NONE, "Intra BSS Bytes Received %d",
  2516. peer->stats.rx.intra_bss.bytes);
  2517. DP_TRACE(NONE, "Raw Packets Received %d",
  2518. peer->stats.rx.raw.num);
  2519. DP_TRACE(NONE, "Raw Bytes Received %d",
  2520. peer->stats.rx.raw.bytes);
  2521. DP_TRACE(NONE, "Errors: MIC Errors %d",
  2522. peer->stats.rx.err.mic_err);
  2523. DP_TRACE(NONE, "Erros: Decryption Errors %d",
  2524. peer->stats.rx.err.decrypt_err);
  2525. DP_TRACE(NONE, "Msdu's Received As Part of Ampdu %d",
  2526. peer->stats.rx.non_ampdu_cnt);
  2527. DP_TRACE(NONE, "Msdu's Recived As Ampdu %d", peer->stats.rx.ampdu_cnt);
  2528. DP_TRACE(NONE, "Msdu's Received Not Part of Amsdu's %d",
  2529. peer->stats.rx.non_amsdu_cnt);
  2530. DP_TRACE(NONE, "MSDUs Received As Part of Amsdu %d",
  2531. peer->stats.rx.amsdu_cnt);
  2532. DP_TRACE(NONE, "SGI:"
  2533. " 0.8us %d,"
  2534. " 0.4us %d,"
  2535. " 1.6us %d,"
  2536. " 3.2us %d,",
  2537. peer->stats.rx.sgi_count[0],
  2538. peer->stats.rx.sgi_count[1],
  2539. peer->stats.rx.sgi_count[2],
  2540. peer->stats.rx.sgi_count[3]);
  2541. DP_TRACE(NONE, "BW Counts: 20MHZ %d, 40MHZ %d, 80MHZ %d, 160MHZ %d",
  2542. peer->stats.rx.bw[0], peer->stats.rx.bw[1],
  2543. peer->stats.rx.bw[2], peer->stats.rx.bw[3]);
  2544. DP_TRACE(NONE, "Reception Type:"
  2545. " SU %d,"
  2546. " MU_MIMO %d,"
  2547. " MU_OFDMA %d,"
  2548. " MU_OFDMA_MIMO %d",
  2549. peer->stats.rx.reception_type[0],
  2550. peer->stats.rx.reception_type[1],
  2551. peer->stats.rx.reception_type[2],
  2552. peer->stats.rx.reception_type[3]);
  2553. index = 0;
  2554. for (i = 0; i < MAX_MCS; i++) {
  2555. index += qdf_snprint(&mcs_rx[index], DP_MCS_LENGTH - index,
  2556. " %d,", peer->stats.rx.mcs_count[i]);
  2557. }
  2558. DP_TRACE(NONE, "MCS(0-11):%s",
  2559. mcs_rx);
  2560. index = 0;
  2561. for (i = 0; i < SS_COUNT; i++) {
  2562. index += qdf_snprint(&nss[index], DP_NSS_LENGTH - index,
  2563. " %d,", peer->stats.rx.nss[i]);
  2564. }
  2565. DP_TRACE(NONE, "NSS(0-7):%s",
  2566. nss);
  2567. DP_TRACE(NONE, "Aggregation:\n");
  2568. DP_TRACE(NONE, "Number of Msdu's Part of Ampdu = %d",
  2569. peer->stats.rx.ampdu_cnt);
  2570. DP_TRACE(NONE, "Number of Msdu's With No Mpdu Level Aggregation : %d",
  2571. peer->stats.rx.non_ampdu_cnt);
  2572. DP_TRACE(NONE, "Number of Msdu's Part of Amsdu: %d",
  2573. peer->stats.rx.amsdu_cnt);
  2574. DP_TRACE(NONE, "Number of Msdu's With No Msdu Level Aggregation: %d",
  2575. peer->stats.rx.non_amsdu_cnt);
  2576. }
  2577. /**
  2578. * dp_print_host_stats()- Function to print the stats aggregated at host
  2579. * @vdev_handle: DP_VDEV handle
  2580. * @req: ol_txrx_stats_req
  2581. * @type: host stats type
  2582. *
  2583. * Available Stat types
  2584. * TXRX_RX_RATE_STATS: Print Rx Rate Info
  2585. * TXRX_TX_RATE_STATS: Print Tx Rate Info
  2586. * TXRX_TX_HOST_STATS: Print Tx Stats
  2587. * TXRX_RX_HOST_STATS: Print Rx Stats
  2588. * TXRX_CLEAR_STATS : Clear the stats
  2589. *
  2590. * Return: 0 on success, print error message in case of failure
  2591. */
  2592. static int
  2593. dp_print_host_stats(struct cdp_vdev *vdev_handle, struct ol_txrx_stats_req *req,
  2594. enum cdp_host_txrx_stats type)
  2595. {
  2596. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2597. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  2598. dp_aggregate_pdev_stats(pdev);
  2599. switch (type) {
  2600. case TXRX_RX_RATE_STATS:
  2601. dp_print_rx_rates(vdev);
  2602. break;
  2603. case TXRX_TX_RATE_STATS:
  2604. dp_print_tx_rates(vdev);
  2605. break;
  2606. case TXRX_TX_HOST_STATS:
  2607. dp_print_pdev_tx_stats(pdev);
  2608. dp_print_soc_tx_stats(pdev->soc);
  2609. break;
  2610. case TXRX_RX_HOST_STATS:
  2611. dp_print_pdev_rx_stats(pdev);
  2612. dp_print_soc_rx_stats(pdev->soc);
  2613. break;
  2614. case TXRX_CLEAR_STATS:
  2615. dp_txrx_host_stats_clr(vdev);
  2616. break;
  2617. default:
  2618. DP_TRACE(NONE, "Wrong Input For TxRx Host Stats");
  2619. break;
  2620. }
  2621. return 0;
  2622. }
  2623. /*
  2624. * dp_get_peer_stats()- function to print peer stats
  2625. * @pdev_handle: DP_PDEV handle
  2626. * @mac_addr: mac address of the peer
  2627. *
  2628. * Return: void
  2629. */
  2630. static void
  2631. dp_get_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
  2632. {
  2633. struct dp_peer *peer;
  2634. uint8_t local_id;
  2635. peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
  2636. &local_id);
  2637. dp_print_peer_stats(peer);
  2638. return;
  2639. }
  2640. /*
  2641. * dp_set_vdev_param: function to set parameters in vdev
  2642. * @param: parameter type to be set
  2643. * @val: value of parameter to be set
  2644. *
  2645. * return: void
  2646. */
  2647. static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
  2648. enum cdp_vdev_param_type param, uint32_t val)
  2649. {
  2650. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2651. switch (param) {
  2652. case CDP_ENABLE_NAWDS:
  2653. vdev->nawds_enabled = val;
  2654. default:
  2655. break;
  2656. }
  2657. }
  2658. /**
  2659. * dp_peer_set_nawds: set nawds bit in peer
  2660. * @peer_handle: pointer to peer
  2661. * @value: enable/disable nawds
  2662. *
  2663. * return: void
  2664. */
  2665. static void dp_peer_set_nawds(void *peer_handle, uint8_t value)
  2666. {
  2667. struct dp_peer *peer = (struct dp_peer *)peer_handle;
  2668. peer->nawds_enabled = value;
  2669. }
  2670. /*
  2671. * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
  2672. * @vdev_handle: DP_VDEV handle
  2673. * @map_id:ID of map that needs to be updated
  2674. *
  2675. * Return: void
  2676. */
  2677. static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
  2678. uint8_t map_id)
  2679. {
  2680. struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
  2681. vdev->dscp_tid_map_id = map_id;
  2682. return;
  2683. }
  2684. /**
  2685. * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
  2686. * @pdev: DP_PDEV handle
  2687. * @map_id: ID of map that needs to be updated
  2688. * @tos: index value in map
  2689. * @tid: tid value passed by the user
  2690. *
  2691. * Return: void
  2692. */
  2693. static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
  2694. uint8_t map_id, uint8_t tos, uint8_t tid)
  2695. {
  2696. uint8_t dscp;
  2697. struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
  2698. dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  2699. pdev->dscp_tid_map[map_id][dscp] = tid;
  2700. hal_tx_update_dscp_tid(pdev->soc->hal_soc, tid,
  2701. map_id, dscp);
  2702. return;
  2703. }
  2704. /*
  2705. * dp_txrx_stats() - function to map to firmware and host stats
  2706. * @vdev: virtual handle
  2707. * @req: statistics request handle
  2708. * @stats: type of statistics requested
  2709. *
  2710. * Return: integer
  2711. */
  2712. static int dp_txrx_stats(struct cdp_vdev *vdev,
  2713. struct ol_txrx_stats_req *req, enum cdp_stats stats)
  2714. {
  2715. int host_stats;
  2716. int fw_stats;
  2717. if (stats >= CDP_TXRX_MAX_STATS)
  2718. return 0;
  2719. fw_stats = dp_stats_mapping_table[stats][STATS_FW];
  2720. host_stats = dp_stats_mapping_table[stats][STATS_HOST];
  2721. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2722. "stats: %u fw_stats_type: %d host_stats_type: %d",
  2723. stats, fw_stats, host_stats);
  2724. /* TODO: Firmware Mapping not implemented */
  2725. if (host_stats != TXRX_HOST_STATS_INVALID)
  2726. return dp_print_host_stats(vdev, req, host_stats);
  2727. return 0;
  2728. }
  2729. static struct cdp_cmn_ops dp_ops_cmn = {
  2730. .txrx_soc_attach_target = dp_soc_attach_target_wifi3,
  2731. .txrx_vdev_attach = dp_vdev_attach_wifi3,
  2732. .txrx_vdev_detach = dp_vdev_detach_wifi3,
  2733. .txrx_pdev_attach = dp_pdev_attach_wifi3,
  2734. .txrx_pdev_detach = dp_pdev_detach_wifi3,
  2735. .txrx_peer_create = dp_peer_create_wifi3,
  2736. .txrx_peer_setup = dp_peer_setup_wifi3,
  2737. .txrx_peer_teardown = NULL,
  2738. .txrx_peer_delete = dp_peer_delete_wifi3,
  2739. .txrx_vdev_register = dp_vdev_register_wifi3,
  2740. .txrx_soc_detach = dp_soc_detach_wifi3,
  2741. .txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
  2742. .txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
  2743. .txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
  2744. .addba_requestprocess = dp_addba_requestprocess_wifi3,
  2745. .addba_responsesetup = dp_addba_responsesetup_wifi3,
  2746. .delba_process = dp_delba_process_wifi3,
  2747. .get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
  2748. .flush_cache_rx_queue = NULL,
  2749. /* TODO: get API's for dscp-tid need to be added*/
  2750. .set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
  2751. .set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
  2752. .txrx_stats = dp_txrx_stats,
  2753. .txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
  2754. /* TODO: Add other functions */
  2755. };
  2756. static struct cdp_ctrl_ops dp_ops_ctrl = {
  2757. .txrx_peer_authorize = dp_peer_authorize,
  2758. .txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
  2759. .txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
  2760. #ifdef MESH_MODE_SUPPORT
  2761. .txrx_set_mesh_mode = dp_peer_set_mesh_mode,
  2762. .txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
  2763. #endif
  2764. .txrx_set_vdev_param = dp_set_vdev_param,
  2765. .txrx_peer_set_nawds = dp_peer_set_nawds,
  2766. /* TODO: Add other functions */
  2767. };
  2768. static struct cdp_me_ops dp_ops_me = {
  2769. /* TODO */
  2770. };
  2771. static struct cdp_mon_ops dp_ops_mon = {
  2772. .txrx_monitor_set_filter_ucast_data = NULL,
  2773. .txrx_monitor_set_filter_mcast_data = NULL,
  2774. .txrx_monitor_set_filter_non_data = NULL,
  2775. .txrx_monitor_get_filter_ucast_data = NULL,
  2776. .txrx_monitor_get_filter_mcast_data = NULL,
  2777. .txrx_monitor_get_filter_non_data = NULL,
  2778. .txrx_reset_monitor_mode = NULL,
  2779. };
  2780. static struct cdp_host_stats_ops dp_ops_host_stats = {
  2781. .txrx_host_stats_get = dp_print_host_stats,
  2782. .txrx_per_peer_stats = dp_get_peer_stats,
  2783. /* TODO */
  2784. };
  2785. static struct cdp_wds_ops dp_ops_wds = {
  2786. /* TODO */
  2787. };
  2788. static struct cdp_raw_ops dp_ops_raw = {
  2789. /* TODO */
  2790. };
  2791. #ifdef CONFIG_WIN
  2792. static struct cdp_pflow_ops dp_ops_pflow = {
  2793. /* TODO */
  2794. };
  2795. #endif /* CONFIG_WIN */
  2796. #ifndef CONFIG_WIN
  2797. static struct cdp_misc_ops dp_ops_misc = {
  2798. .get_opmode = dp_get_opmode,
  2799. };
  2800. static struct cdp_flowctl_ops dp_ops_flowctl = {
  2801. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  2802. };
  2803. static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
  2804. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  2805. };
  2806. static struct cdp_ipa_ops dp_ops_ipa = {
  2807. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  2808. };
  2809. static struct cdp_lro_ops dp_ops_lro = {
  2810. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  2811. };
  2812. /**
  2813. * dp_dummy_bus_suspend() - dummy bus suspend op
  2814. *
  2815. * FIXME - This is a placeholder for the actual logic!
  2816. *
  2817. * Return: QDF_STATUS_SUCCESS
  2818. */
  2819. inline QDF_STATUS dp_dummy_bus_suspend(void)
  2820. {
  2821. return QDF_STATUS_SUCCESS;
  2822. }
  2823. /**
  2824. * dp_dummy_bus_resume() - dummy bus resume
  2825. *
  2826. * FIXME - This is a placeholder for the actual logic!
  2827. *
  2828. * Return: QDF_STATUS_SUCCESS
  2829. */
  2830. inline QDF_STATUS dp_dummy_bus_resume(void)
  2831. {
  2832. return QDF_STATUS_SUCCESS;
  2833. }
  2834. static struct cdp_bus_ops dp_ops_bus = {
  2835. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  2836. .bus_suspend = dp_dummy_bus_suspend,
  2837. .bus_resume = dp_dummy_bus_resume
  2838. };
  2839. static struct cdp_ocb_ops dp_ops_ocb = {
  2840. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  2841. };
  2842. static struct cdp_throttle_ops dp_ops_throttle = {
  2843. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  2844. };
  2845. static struct cdp_mob_stats_ops dp_ops_mob_stats = {
  2846. };
  2847. static struct cdp_cfg_ops dp_ops_cfg = {
  2848. /* WIFI 3.0 DP NOT IMPLEMENTED YET */
  2849. };
  2850. static struct cdp_peer_ops dp_ops_peer = {
  2851. .register_peer = dp_register_peer,
  2852. .clear_peer = dp_clear_peer,
  2853. .find_peer_by_addr = dp_find_peer_by_addr,
  2854. .find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
  2855. .local_peer_id = dp_local_peer_id,
  2856. .peer_find_by_local_id = dp_peer_find_by_local_id,
  2857. .peer_state_update = dp_peer_state_update,
  2858. .get_vdevid = dp_get_vdevid,
  2859. .peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
  2860. .get_vdev_for_peer = dp_get_vdev_for_peer,
  2861. .get_peer_state = dp_get_peer_state,
  2862. .last_assoc_received = dp_get_last_assoc_received,
  2863. .last_disassoc_received = dp_get_last_disassoc_received,
  2864. .last_deauth_received = dp_get_last_deauth_received,
  2865. };
  2866. #endif
  2867. static struct cdp_ops dp_txrx_ops = {
  2868. .cmn_drv_ops = &dp_ops_cmn,
  2869. .ctrl_ops = &dp_ops_ctrl,
  2870. .me_ops = &dp_ops_me,
  2871. .mon_ops = &dp_ops_mon,
  2872. .host_stats_ops = &dp_ops_host_stats,
  2873. .wds_ops = &dp_ops_wds,
  2874. .raw_ops = &dp_ops_raw,
  2875. #ifdef CONFIG_WIN
  2876. .pflow_ops = &dp_ops_pflow,
  2877. #endif /* CONFIG_WIN */
  2878. #ifndef CONFIG_WIN
  2879. .misc_ops = &dp_ops_misc,
  2880. .cfg_ops = &dp_ops_cfg,
  2881. .flowctl_ops = &dp_ops_flowctl,
  2882. .l_flowctl_ops = &dp_ops_l_flowctl,
  2883. .ipa_ops = &dp_ops_ipa,
  2884. .lro_ops = &dp_ops_lro,
  2885. .bus_ops = &dp_ops_bus,
  2886. .ocb_ops = &dp_ops_ocb,
  2887. .peer_ops = &dp_ops_peer,
  2888. .throttle_ops = &dp_ops_throttle,
  2889. .mob_stats_ops = &dp_ops_mob_stats,
  2890. #endif
  2891. };
  2892. /*
  2893. * dp_soc_attach_wifi3() - Attach txrx SOC
  2894. * @osif_soc: Opaque SOC handle from OSIF/HDD
  2895. * @htc_handle: Opaque HTC handle
  2896. * @hif_handle: Opaque HIF handle
  2897. * @qdf_osdev: QDF device
  2898. *
  2899. * Return: DP SOC handle on success, NULL on failure
  2900. */
  2901. /*
  2902. * Local prototype added to temporarily address warning caused by
  2903. * -Wmissing-prototypes. A more correct solution, namely to expose
  2904. * a prototype in an appropriate header file, will come later.
  2905. */
  2906. void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
  2907. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  2908. struct ol_if_ops *ol_ops);
  2909. void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
  2910. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  2911. struct ol_if_ops *ol_ops)
  2912. {
  2913. struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
  2914. if (!soc) {
  2915. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2916. FL("DP SOC memory allocation failed"));
  2917. goto fail0;
  2918. }
  2919. soc->cdp_soc.ops = &dp_txrx_ops;
  2920. soc->cdp_soc.ol_ops = ol_ops;
  2921. soc->osif_soc = osif_soc;
  2922. soc->osdev = qdf_osdev;
  2923. soc->hif_handle = hif_handle;
  2924. soc->hal_soc = hif_get_hal_handle(hif_handle);
  2925. soc->htt_handle = htt_soc_attach(soc, osif_soc, htc_handle,
  2926. soc->hal_soc, qdf_osdev);
  2927. if (!soc->htt_handle) {
  2928. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2929. FL("HTT attach failed"));
  2930. goto fail1;
  2931. }
  2932. soc->wlan_cfg_ctx = wlan_cfg_soc_attach();
  2933. if (!soc->wlan_cfg_ctx) {
  2934. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2935. FL("wlan_cfg_soc_attach failed"));
  2936. goto fail2;
  2937. }
  2938. qdf_spinlock_create(&soc->peer_ref_mutex);
  2939. if (dp_soc_interrupt_attach(soc) != QDF_STATUS_SUCCESS) {
  2940. goto fail2;
  2941. }
  2942. qdf_spinlock_create(&soc->reo_desc_freelist_lock);
  2943. qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
  2944. return (void *)soc;
  2945. fail2:
  2946. htt_soc_detach(soc->htt_handle);
  2947. fail1:
  2948. qdf_mem_free(soc);
  2949. fail0:
  2950. return NULL;
  2951. }