dp_htt.c 111 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <htt.h>
  19. #include <hal_hw_headers.h>
  20. #include <hal_api.h>
  21. #include "dp_peer.h"
  22. #include "dp_types.h"
  23. #include "dp_internal.h"
  24. #include "dp_rx.h"
  25. #include "htt_stats.h"
  26. #include "htt_ppdu_stats.h"
  27. #include "dp_htt.h"
  28. #ifdef WIFI_MONITOR_SUPPORT
  29. #include <dp_mon.h>
  30. #endif
  31. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  32. #include "cdp_txrx_cmn_struct.h"
  33. #ifdef FEATURE_PERPKT_INFO
  34. #include "dp_ratetable.h"
  35. #endif
  36. #include <qdf_module.h>
  37. #define HTT_TLV_HDR_LEN HTT_T2H_EXT_STATS_CONF_TLV_HDR_SIZE
  38. #define HTT_HTC_PKT_POOL_INIT_SIZE 64
  39. #define HTT_MSG_BUF_SIZE(msg_bytes) \
  40. ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
  41. #define HTT_PID_BIT_MASK 0x3
  42. #define DP_EXT_MSG_LENGTH 2048
  43. #define HTT_HEADER_LEN 16
  44. #define HTT_MGMT_CTRL_TLV_HDR_RESERVERD_LEN 16
  45. #define HTT_SHIFT_UPPER_TIMESTAMP 32
  46. #define HTT_MASK_UPPER_TIMESTAMP 0xFFFFFFFF00000000
  47. #define HTT_HTC_PKT_STATUS_SUCCESS \
  48. ((pkt->htc_pkt.Status != QDF_STATUS_E_CANCELED) && \
  49. (pkt->htc_pkt.Status != QDF_STATUS_E_RESOURCES))
  50. #define HTT_BKP_STATS_MAX_QUEUE_DEPTH 16
  51. /*
  52. * htt_htc_pkt_alloc() - Allocate HTC packet buffer
  53. * @htt_soc: HTT SOC handle
  54. *
  55. * Return: Pointer to htc packet buffer
  56. */
  57. static struct dp_htt_htc_pkt *
  58. htt_htc_pkt_alloc(struct htt_soc *soc)
  59. {
  60. struct dp_htt_htc_pkt_union *pkt = NULL;
  61. HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
  62. if (soc->htt_htc_pkt_freelist) {
  63. pkt = soc->htt_htc_pkt_freelist;
  64. soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
  65. }
  66. HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
  67. if (!pkt)
  68. pkt = qdf_mem_malloc(sizeof(*pkt));
  69. if (!pkt)
  70. return NULL;
  71. htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
  72. return &pkt->u.pkt; /* not actually a dereference */
  73. }
  74. /*
  75. * htt_htc_pkt_free() - Free HTC packet buffer
  76. * @htt_soc: HTT SOC handle
  77. */
  78. static void
  79. htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
  80. {
  81. struct dp_htt_htc_pkt_union *u_pkt =
  82. (struct dp_htt_htc_pkt_union *)pkt;
  83. HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
  84. htc_packet_set_magic_cookie(&(u_pkt->u.pkt.htc_pkt), 0);
  85. u_pkt->u.next = soc->htt_htc_pkt_freelist;
  86. soc->htt_htc_pkt_freelist = u_pkt;
  87. HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
  88. }
  89. /*
  90. * htt_htc_pkt_pool_free() - Free HTC packet pool
  91. * @htt_soc: HTT SOC handle
  92. */
  93. void
  94. htt_htc_pkt_pool_free(struct htt_soc *soc)
  95. {
  96. struct dp_htt_htc_pkt_union *pkt, *next;
  97. pkt = soc->htt_htc_pkt_freelist;
  98. while (pkt) {
  99. next = pkt->u.next;
  100. qdf_mem_free(pkt);
  101. pkt = next;
  102. }
  103. soc->htt_htc_pkt_freelist = NULL;
  104. }
  105. #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
  106. static void
  107. htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
  108. {
  109. }
  110. #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
  111. /*
  112. * htt_htc_misc_pkt_list_trim() - trim misc list
  113. * @htt_soc: HTT SOC handle
  114. * @level: max no. of pkts in list
  115. */
  116. static void
  117. htt_htc_misc_pkt_list_trim(struct htt_soc *soc, int level)
  118. {
  119. struct dp_htt_htc_pkt_union *pkt, *next, *prev = NULL;
  120. int i = 0;
  121. qdf_nbuf_t netbuf;
  122. HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
  123. pkt = soc->htt_htc_pkt_misclist;
  124. while (pkt) {
  125. next = pkt->u.next;
  126. /* trim the out grown list*/
  127. if (++i > level) {
  128. netbuf =
  129. (qdf_nbuf_t)(pkt->u.pkt.htc_pkt.pNetBufContext);
  130. qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
  131. qdf_nbuf_free(netbuf);
  132. qdf_mem_free(pkt);
  133. pkt = NULL;
  134. if (prev)
  135. prev->u.next = NULL;
  136. }
  137. prev = pkt;
  138. pkt = next;
  139. }
  140. HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
  141. }
  142. /*
  143. * htt_htc_misc_pkt_list_add() - Add pkt to misc list
  144. * @htt_soc: HTT SOC handle
  145. * @dp_htt_htc_pkt: pkt to be added to list
  146. */
  147. static void
  148. htt_htc_misc_pkt_list_add(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
  149. {
  150. struct dp_htt_htc_pkt_union *u_pkt =
  151. (struct dp_htt_htc_pkt_union *)pkt;
  152. int misclist_trim_level = htc_get_tx_queue_depth(soc->htc_soc,
  153. pkt->htc_pkt.Endpoint)
  154. + DP_HTT_HTC_PKT_MISCLIST_SIZE;
  155. HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
  156. if (soc->htt_htc_pkt_misclist) {
  157. u_pkt->u.next = soc->htt_htc_pkt_misclist;
  158. soc->htt_htc_pkt_misclist = u_pkt;
  159. } else {
  160. soc->htt_htc_pkt_misclist = u_pkt;
  161. }
  162. HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
  163. /* only ce pipe size + tx_queue_depth could possibly be in use
  164. * free older packets in the misclist
  165. */
  166. htt_htc_misc_pkt_list_trim(soc, misclist_trim_level);
  167. }
  168. #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
  169. /**
  170. * DP_HTT_SEND_HTC_PKT() - Send htt packet from host
  171. * @soc : HTT SOC handle
  172. * @pkt: pkt to be send
  173. * @cmd : command to be recorded in dp htt logger
  174. * @buf : Pointer to buffer needs to be recored for above cmd
  175. *
  176. * Return: None
  177. */
  178. static inline QDF_STATUS DP_HTT_SEND_HTC_PKT(struct htt_soc *soc,
  179. struct dp_htt_htc_pkt *pkt,
  180. uint8_t cmd, uint8_t *buf)
  181. {
  182. QDF_STATUS status;
  183. htt_command_record(soc->htt_logger_handle, cmd, buf);
  184. status = htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
  185. if (status == QDF_STATUS_SUCCESS && HTT_HTC_PKT_STATUS_SUCCESS)
  186. htt_htc_misc_pkt_list_add(soc, pkt);
  187. else
  188. soc->stats.fail_count++;
  189. return status;
  190. }
  191. /*
  192. * htt_htc_misc_pkt_pool_free() - free pkts in misc list
  193. * @htt_soc: HTT SOC handle
  194. */
  195. static void
  196. htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
  197. {
  198. struct dp_htt_htc_pkt_union *pkt, *next;
  199. qdf_nbuf_t netbuf;
  200. HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
  201. pkt = soc->htt_htc_pkt_misclist;
  202. while (pkt) {
  203. next = pkt->u.next;
  204. if (htc_packet_get_magic_cookie(&(pkt->u.pkt.htc_pkt)) !=
  205. HTC_PACKET_MAGIC_COOKIE) {
  206. pkt = next;
  207. soc->stats.skip_count++;
  208. continue;
  209. }
  210. netbuf = (qdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
  211. qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
  212. soc->stats.htc_pkt_free++;
  213. dp_htt_info("%pK: Pkt free count %d",
  214. soc->dp_soc, soc->stats.htc_pkt_free);
  215. qdf_nbuf_free(netbuf);
  216. qdf_mem_free(pkt);
  217. pkt = next;
  218. }
  219. soc->htt_htc_pkt_misclist = NULL;
  220. HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
  221. dp_info("HTC Packets, fail count = %d, skip count = %d",
  222. soc->stats.fail_count, soc->stats.skip_count);
  223. }
  224. /*
  225. * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianness differ
  226. * @tgt_mac_addr: Target MAC
  227. * @buffer: Output buffer
  228. */
  229. static u_int8_t *
  230. htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
  231. {
  232. #ifdef BIG_ENDIAN_HOST
  233. /*
  234. * The host endianness is opposite of the target endianness.
  235. * To make u_int32_t elements come out correctly, the target->host
  236. * upload has swizzled the bytes in each u_int32_t element of the
  237. * message.
  238. * For byte-array message fields like the MAC address, this
  239. * upload swizzling puts the bytes in the wrong order, and needs
  240. * to be undone.
  241. */
  242. buffer[0] = tgt_mac_addr[3];
  243. buffer[1] = tgt_mac_addr[2];
  244. buffer[2] = tgt_mac_addr[1];
  245. buffer[3] = tgt_mac_addr[0];
  246. buffer[4] = tgt_mac_addr[7];
  247. buffer[5] = tgt_mac_addr[6];
  248. return buffer;
  249. #else
  250. /*
  251. * The host endianness matches the target endianness -
  252. * we can use the mac addr directly from the message buffer.
  253. */
  254. return tgt_mac_addr;
  255. #endif
  256. }
  257. /*
  258. * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
  259. * @soc: SOC handle
  260. * @status: Completion status
  261. * @netbuf: HTT buffer
  262. */
  263. static void
  264. dp_htt_h2t_send_complete_free_netbuf(
  265. void *soc, A_STATUS status, qdf_nbuf_t netbuf)
  266. {
  267. qdf_nbuf_free(netbuf);
  268. }
  269. #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
  270. /*
  271. * dp_htt_h2t_send_complete() - H2T completion handler
  272. * @context: Opaque context (HTT SOC handle)
  273. * @htc_pkt: HTC packet
  274. */
  275. static void
  276. dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
  277. {
  278. struct htt_soc *soc = (struct htt_soc *) context;
  279. struct dp_htt_htc_pkt *htt_pkt;
  280. qdf_nbuf_t netbuf;
  281. htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
  282. /* process (free or keep) the netbuf that held the message */
  283. netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
  284. /*
  285. * adf sendcomplete is required for windows only
  286. */
  287. /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
  288. /* free the htt_htc_pkt / HTC_PACKET object */
  289. qdf_nbuf_free(netbuf);
  290. htt_htc_pkt_free(soc, htt_pkt);
  291. }
  292. #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
  293. /*
  294. * * dp_htt_h2t_send_complete() - H2T completion handler
  295. * * @context: Opaque context (HTT SOC handle)
  296. * * @htc_pkt: HTC packet
  297. * */
  298. static void
  299. dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
  300. {
  301. void (*send_complete_part2)(
  302. void *soc, QDF_STATUS status, qdf_nbuf_t msdu);
  303. struct htt_soc *soc = (struct htt_soc *) context;
  304. struct dp_htt_htc_pkt *htt_pkt;
  305. qdf_nbuf_t netbuf;
  306. send_complete_part2 = htc_pkt->pPktContext;
  307. htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
  308. /* process (free or keep) the netbuf that held the message */
  309. netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
  310. /*
  311. * adf sendcomplete is required for windows only
  312. */
  313. /* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
  314. if (send_complete_part2){
  315. send_complete_part2(
  316. htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
  317. }
  318. /* free the htt_htc_pkt / HTC_PACKET object */
  319. htt_htc_pkt_free(soc, htt_pkt);
  320. }
  321. #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
  322. /*
  323. * htt_h2t_ver_req_msg() - Send HTT version request message to target
  324. * @htt_soc: HTT SOC handle
  325. *
  326. * Return: 0 on success; error code on failure
  327. */
  328. static int htt_h2t_ver_req_msg(struct htt_soc *soc)
  329. {
  330. struct dp_htt_htc_pkt *pkt;
  331. qdf_nbuf_t msg;
  332. uint32_t *msg_word;
  333. QDF_STATUS status;
  334. msg = qdf_nbuf_alloc(
  335. soc->osdev,
  336. HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
  337. /* reserve room for the HTC header */
  338. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
  339. if (!msg)
  340. return QDF_STATUS_E_NOMEM;
  341. /*
  342. * Set the length of the message.
  343. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  344. * separately during the below call to qdf_nbuf_push_head.
  345. * The contribution from the HTC header is added separately inside HTC.
  346. */
  347. if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
  348. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  349. "%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
  350. __func__);
  351. return QDF_STATUS_E_FAILURE;
  352. }
  353. /* fill in the message contents */
  354. msg_word = (u_int32_t *) qdf_nbuf_data(msg);
  355. /* rewind beyond alignment pad to get to the HTC header reserved area */
  356. qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
  357. *msg_word = 0;
  358. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
  359. pkt = htt_htc_pkt_alloc(soc);
  360. if (!pkt) {
  361. qdf_nbuf_free(msg);
  362. return QDF_STATUS_E_FAILURE;
  363. }
  364. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  365. SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
  366. dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
  367. qdf_nbuf_len(msg), soc->htc_endpoint,
  368. HTC_TX_PACKET_TAG_RTPM_PUT_RC);
  369. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
  370. status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_VERSION_REQ,
  371. NULL);
  372. if (status != QDF_STATUS_SUCCESS) {
  373. qdf_nbuf_free(msg);
  374. htt_htc_pkt_free(soc, pkt);
  375. }
  376. return status;
  377. }
  378. /*
  379. * htt_srng_setup() - Send SRNG setup message to target
  380. * @htt_soc: HTT SOC handle
  381. * @mac_id: MAC Id
  382. * @hal_srng: Opaque HAL SRNG pointer
  383. * @hal_ring_type: SRNG ring type
  384. *
  385. * Return: 0 on success; error code on failure
  386. */
  387. int htt_srng_setup(struct htt_soc *soc, int mac_id,
  388. hal_ring_handle_t hal_ring_hdl,
  389. int hal_ring_type)
  390. {
  391. struct dp_htt_htc_pkt *pkt;
  392. qdf_nbuf_t htt_msg;
  393. uint32_t *msg_word;
  394. struct hal_srng_params srng_params;
  395. qdf_dma_addr_t hp_addr, tp_addr;
  396. uint32_t ring_entry_size =
  397. hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
  398. int htt_ring_type, htt_ring_id;
  399. uint8_t *htt_logger_bufp;
  400. int target_pdev_id;
  401. int lmac_id = dp_get_lmac_id_for_pdev_id(soc->dp_soc, 0, mac_id);
  402. QDF_STATUS status;
  403. /* Sizes should be set in 4-byte words */
  404. ring_entry_size = ring_entry_size >> 2;
  405. htt_msg = qdf_nbuf_alloc(soc->osdev,
  406. HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
  407. /* reserve room for the HTC header */
  408. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
  409. if (!htt_msg)
  410. goto fail0;
  411. hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
  412. hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_ring_hdl);
  413. tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_ring_hdl);
  414. switch (hal_ring_type) {
  415. case RXDMA_BUF:
  416. #ifdef QCA_HOST2FW_RXBUF_RING
  417. if (srng_params.ring_id ==
  418. (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
  419. htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
  420. htt_ring_type = HTT_SW_TO_SW_RING;
  421. #ifdef IPA_OFFLOAD
  422. } else if (srng_params.ring_id ==
  423. (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
  424. htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
  425. htt_ring_type = HTT_SW_TO_SW_RING;
  426. #endif
  427. #else
  428. if (srng_params.ring_id ==
  429. (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
  430. (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
  431. htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
  432. htt_ring_type = HTT_SW_TO_HW_RING;
  433. #endif
  434. } else if (srng_params.ring_id ==
  435. #ifdef IPA_OFFLOAD
  436. (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
  437. #else
  438. (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
  439. #endif
  440. (lmac_id * HAL_MAX_RINGS_PER_LMAC))) {
  441. htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
  442. htt_ring_type = HTT_SW_TO_HW_RING;
  443. } else {
  444. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  445. "%s: Ring %d currently not supported",
  446. __func__, srng_params.ring_id);
  447. goto fail1;
  448. }
  449. dp_info("ring_type %d ring_id %d htt_ring_id %d hp_addr 0x%llx tp_addr 0x%llx",
  450. hal_ring_type, srng_params.ring_id, htt_ring_id,
  451. (uint64_t)hp_addr,
  452. (uint64_t)tp_addr);
  453. break;
  454. case RXDMA_MONITOR_BUF:
  455. htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
  456. htt_ring_type = HTT_SW_TO_HW_RING;
  457. break;
  458. case RXDMA_MONITOR_STATUS:
  459. htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
  460. htt_ring_type = HTT_SW_TO_HW_RING;
  461. break;
  462. case RXDMA_MONITOR_DST:
  463. htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
  464. htt_ring_type = HTT_HW_TO_SW_RING;
  465. break;
  466. case RXDMA_MONITOR_DESC:
  467. htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
  468. htt_ring_type = HTT_SW_TO_HW_RING;
  469. break;
  470. case RXDMA_DST:
  471. htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
  472. htt_ring_type = HTT_HW_TO_SW_RING;
  473. break;
  474. default:
  475. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  476. "%s: Ring currently not supported", __func__);
  477. goto fail1;
  478. }
  479. /*
  480. * Set the length of the message.
  481. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  482. * separately during the below call to qdf_nbuf_push_head.
  483. * The contribution from the HTC header is added separately inside HTC.
  484. */
  485. if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
  486. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  487. "%s: Failed to expand head for SRING_SETUP msg",
  488. __func__);
  489. return QDF_STATUS_E_FAILURE;
  490. }
  491. msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
  492. /* rewind beyond alignment pad to get to the HTC header reserved area */
  493. qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
  494. /* word 0 */
  495. *msg_word = 0;
  496. htt_logger_bufp = (uint8_t *)msg_word;
  497. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
  498. target_pdev_id =
  499. dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, mac_id);
  500. if ((htt_ring_type == HTT_SW_TO_HW_RING) ||
  501. (htt_ring_type == HTT_HW_TO_SW_RING))
  502. HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, target_pdev_id);
  503. else
  504. HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
  505. dp_info("mac_id %d", mac_id);
  506. HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
  507. /* TODO: Discuss with FW on changing this to unique ID and using
  508. * htt_ring_type to send the type of ring
  509. */
  510. HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
  511. /* word 1 */
  512. msg_word++;
  513. *msg_word = 0;
  514. HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
  515. srng_params.ring_base_paddr & 0xffffffff);
  516. /* word 2 */
  517. msg_word++;
  518. *msg_word = 0;
  519. HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
  520. (uint64_t)srng_params.ring_base_paddr >> 32);
  521. /* word 3 */
  522. msg_word++;
  523. *msg_word = 0;
  524. HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
  525. HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
  526. (ring_entry_size * srng_params.num_entries));
  527. dp_info("entry_size %d", ring_entry_size);
  528. dp_info("num_entries %d", srng_params.num_entries);
  529. dp_info("ring_size %d", (ring_entry_size * srng_params.num_entries));
  530. if (htt_ring_type == HTT_SW_TO_HW_RING)
  531. HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
  532. *msg_word, 1);
  533. HTT_SRING_SETUP_RING_MISC_CFG_FLAG_MSI_SWAP_SET(*msg_word,
  534. !!(srng_params.flags & HAL_SRNG_MSI_SWAP));
  535. HTT_SRING_SETUP_RING_MISC_CFG_FLAG_TLV_SWAP_SET(*msg_word,
  536. !!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
  537. HTT_SRING_SETUP_RING_MISC_CFG_FLAG_HOST_FW_SWAP_SET(*msg_word,
  538. !!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
  539. /* word 4 */
  540. msg_word++;
  541. *msg_word = 0;
  542. HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
  543. hp_addr & 0xffffffff);
  544. /* word 5 */
  545. msg_word++;
  546. *msg_word = 0;
  547. HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
  548. (uint64_t)hp_addr >> 32);
  549. /* word 6 */
  550. msg_word++;
  551. *msg_word = 0;
  552. HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
  553. tp_addr & 0xffffffff);
  554. /* word 7 */
  555. msg_word++;
  556. *msg_word = 0;
  557. HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
  558. (uint64_t)tp_addr >> 32);
  559. /* word 8 */
  560. msg_word++;
  561. *msg_word = 0;
  562. HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
  563. srng_params.msi_addr & 0xffffffff);
  564. /* word 9 */
  565. msg_word++;
  566. *msg_word = 0;
  567. HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
  568. (uint64_t)(srng_params.msi_addr) >> 32);
  569. /* word 10 */
  570. msg_word++;
  571. *msg_word = 0;
  572. HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
  573. qdf_cpu_to_le32(srng_params.msi_data));
  574. /* word 11 */
  575. msg_word++;
  576. *msg_word = 0;
  577. HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
  578. srng_params.intr_batch_cntr_thres_entries *
  579. ring_entry_size);
  580. HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
  581. srng_params.intr_timer_thres_us >> 3);
  582. /* word 12 */
  583. msg_word++;
  584. *msg_word = 0;
  585. if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
  586. /* TODO: Setting low threshold to 1/8th of ring size - see
  587. * if this needs to be configurable
  588. */
  589. HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
  590. srng_params.low_threshold);
  591. }
  592. /* "response_required" field should be set if a HTT response message is
  593. * required after setting up the ring.
  594. */
  595. pkt = htt_htc_pkt_alloc(soc);
  596. if (!pkt)
  597. goto fail1;
  598. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  599. SET_HTC_PACKET_INFO_TX(
  600. &pkt->htc_pkt,
  601. dp_htt_h2t_send_complete_free_netbuf,
  602. qdf_nbuf_data(htt_msg),
  603. qdf_nbuf_len(htt_msg),
  604. soc->htc_endpoint,
  605. HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
  606. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
  607. status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_SRING_SETUP,
  608. htt_logger_bufp);
  609. if (status != QDF_STATUS_SUCCESS) {
  610. qdf_nbuf_free(htt_msg);
  611. htt_htc_pkt_free(soc, pkt);
  612. }
  613. return status;
  614. fail1:
  615. qdf_nbuf_free(htt_msg);
  616. fail0:
  617. return QDF_STATUS_E_FAILURE;
  618. }
  619. qdf_export_symbol(htt_srng_setup);
  620. #ifdef QCA_SUPPORT_FULL_MON
  621. /**
  622. * htt_h2t_full_mon_cfg() - Send full monitor configuarion msg to FW
  623. *
  624. * @htt_soc: HTT Soc handle
  625. * @pdev_id: Radio id
  626. * @dp_full_mon_config: enabled/disable configuration
  627. *
  628. * Return: Success when HTT message is sent, error on failure
  629. */
  630. int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
  631. uint8_t pdev_id,
  632. enum dp_full_mon_config config)
  633. {
  634. struct htt_soc *soc = (struct htt_soc *)htt_soc;
  635. struct dp_htt_htc_pkt *pkt;
  636. qdf_nbuf_t htt_msg;
  637. uint32_t *msg_word;
  638. uint8_t *htt_logger_bufp;
  639. htt_msg = qdf_nbuf_alloc(soc->osdev,
  640. HTT_MSG_BUF_SIZE(
  641. HTT_RX_FULL_MONITOR_MODE_SETUP_SZ),
  642. /* reserve room for the HTC header */
  643. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
  644. 4,
  645. TRUE);
  646. if (!htt_msg)
  647. return QDF_STATUS_E_FAILURE;
  648. /*
  649. * Set the length of the message.
  650. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  651. * separately during the below call to qdf_nbuf_push_head.
  652. * The contribution from the HTC header is added separately inside HTC.
  653. */
  654. if (!qdf_nbuf_put_tail(htt_msg, HTT_RX_FULL_MONITOR_MODE_SETUP_SZ)) {
  655. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  656. "%s: Failed to expand head for RX Ring Cfg msg",
  657. __func__);
  658. goto fail1;
  659. }
  660. msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
  661. /* rewind beyond alignment pad to get to the HTC header reserved area */
  662. qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
  663. /* word 0 */
  664. *msg_word = 0;
  665. htt_logger_bufp = (uint8_t *)msg_word;
  666. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
  667. HTT_RX_FULL_MONITOR_MODE_OPERATION_PDEV_ID_SET(
  668. *msg_word, DP_SW2HW_MACID(pdev_id));
  669. msg_word++;
  670. *msg_word = 0;
  671. /* word 1 */
  672. if (config == DP_FULL_MON_ENABLE) {
  673. HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
  674. HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, true);
  675. HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, true);
  676. HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
  677. } else if (config == DP_FULL_MON_DISABLE) {
  678. /* As per MAC team's suggestion, While disbaling full monitor
  679. * mode, Set 'en' bit to true in full monitor mode register.
  680. */
  681. HTT_RX_FULL_MONITOR_MODE_ENABLE_SET(*msg_word, true);
  682. HTT_RX_FULL_MONITOR_MODE_ZERO_MPDU_SET(*msg_word, false);
  683. HTT_RX_FULL_MONITOR_MODE_NON_ZERO_MPDU_SET(*msg_word, false);
  684. HTT_RX_FULL_MONITOR_MODE_RELEASE_RINGS_SET(*msg_word, 0x2);
  685. }
  686. pkt = htt_htc_pkt_alloc(soc);
  687. if (!pkt) {
  688. qdf_err("HTC packet allocation failed");
  689. goto fail1;
  690. }
  691. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  692. SET_HTC_PACKET_INFO_TX(
  693. &pkt->htc_pkt,
  694. dp_htt_h2t_send_complete_free_netbuf,
  695. qdf_nbuf_data(htt_msg),
  696. qdf_nbuf_len(htt_msg),
  697. soc->htc_endpoint,
  698. HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
  699. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
  700. qdf_debug("config: %d", config);
  701. DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE,
  702. htt_logger_bufp);
  703. return QDF_STATUS_SUCCESS;
  704. fail1:
  705. qdf_nbuf_free(htt_msg);
  706. return QDF_STATUS_E_FAILURE;
  707. }
  708. qdf_export_symbol(htt_h2t_full_mon_cfg);
  709. #else
  710. int htt_h2t_full_mon_cfg(struct htt_soc *htt_soc,
  711. uint8_t pdev_id,
  712. enum dp_full_mon_config config)
  713. {
  714. return 0;
  715. }
  716. qdf_export_symbol(htt_h2t_full_mon_cfg);
  717. #endif
  718. /*
  719. * htt_h2t_rx_ring_cfg() - Send SRNG packet and TLV filter
  720. * config message to target
  721. * @htt_soc: HTT SOC handle
  722. * @pdev_id: WIN- PDEV Id, MCL- mac id
  723. * @hal_srng: Opaque HAL SRNG pointer
  724. * @hal_ring_type: SRNG ring type
  725. * @ring_buf_size: SRNG buffer size
  726. * @htt_tlv_filter: Rx SRNG TLV and filter setting
  727. * Return: 0 on success; error code on failure
  728. */
  729. int htt_h2t_rx_ring_cfg(struct htt_soc *htt_soc, int pdev_id,
  730. hal_ring_handle_t hal_ring_hdl,
  731. int hal_ring_type, int ring_buf_size,
  732. struct htt_rx_ring_tlv_filter *htt_tlv_filter)
  733. {
  734. struct htt_soc *soc = (struct htt_soc *)htt_soc;
  735. struct dp_htt_htc_pkt *pkt;
  736. qdf_nbuf_t htt_msg;
  737. uint32_t *msg_word;
  738. struct hal_srng_params srng_params;
  739. uint32_t htt_ring_type, htt_ring_id;
  740. uint32_t tlv_filter;
  741. uint8_t *htt_logger_bufp;
  742. struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->dp_soc->wlan_cfg_ctx;
  743. uint32_t mon_drop_th = wlan_cfg_get_mon_drop_thresh(wlan_cfg_ctx);
  744. int target_pdev_id;
  745. QDF_STATUS status;
  746. htt_msg = qdf_nbuf_alloc(soc->osdev,
  747. HTT_MSG_BUF_SIZE(HTT_RX_RING_SELECTION_CFG_SZ),
  748. /* reserve room for the HTC header */
  749. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
  750. if (!htt_msg)
  751. goto fail0;
  752. hal_get_srng_params(soc->hal_soc, hal_ring_hdl, &srng_params);
  753. switch (hal_ring_type) {
  754. case RXDMA_BUF:
  755. htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
  756. htt_ring_type = HTT_SW_TO_HW_RING;
  757. break;
  758. case RXDMA_MONITOR_BUF:
  759. htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
  760. htt_ring_type = HTT_SW_TO_HW_RING;
  761. break;
  762. case RXDMA_MONITOR_STATUS:
  763. htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
  764. htt_ring_type = HTT_SW_TO_HW_RING;
  765. break;
  766. case RXDMA_MONITOR_DST:
  767. htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
  768. htt_ring_type = HTT_HW_TO_SW_RING;
  769. break;
  770. case RXDMA_MONITOR_DESC:
  771. htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
  772. htt_ring_type = HTT_SW_TO_HW_RING;
  773. break;
  774. case RXDMA_DST:
  775. htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
  776. htt_ring_type = HTT_HW_TO_SW_RING;
  777. break;
  778. default:
  779. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  780. "%s: Ring currently not supported", __func__);
  781. goto fail1;
  782. }
  783. /*
  784. * Set the length of the message.
  785. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  786. * separately during the below call to qdf_nbuf_push_head.
  787. * The contribution from the HTC header is added separately inside HTC.
  788. */
  789. if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
  790. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  791. "%s: Failed to expand head for RX Ring Cfg msg",
  792. __func__);
  793. goto fail1; /* failure */
  794. }
  795. msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
  796. /* rewind beyond alignment pad to get to the HTC header reserved area */
  797. qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
  798. /* word 0 */
  799. htt_logger_bufp = (uint8_t *)msg_word;
  800. *msg_word = 0;
  801. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
  802. /*
  803. * pdev_id is indexed from 0 whereas mac_id is indexed from 1
  804. * SW_TO_SW and SW_TO_HW rings are unaffected by this
  805. */
  806. target_pdev_id =
  807. dp_get_target_pdev_id_for_host_pdev_id(soc->dp_soc, pdev_id);
  808. if (htt_ring_type == HTT_SW_TO_SW_RING ||
  809. htt_ring_type == HTT_SW_TO_HW_RING)
  810. HTT_RX_RING_SELECTION_CFG_PDEV_ID_SET(*msg_word,
  811. target_pdev_id);
  812. /* TODO: Discuss with FW on changing this to unique ID and using
  813. * htt_ring_type to send the type of ring
  814. */
  815. HTT_RX_RING_SELECTION_CFG_RING_ID_SET(*msg_word, htt_ring_id);
  816. HTT_RX_RING_SELECTION_CFG_STATUS_TLV_SET(*msg_word,
  817. !!(srng_params.flags & HAL_SRNG_MSI_SWAP));
  818. HTT_RX_RING_SELECTION_CFG_RX_OFFSETS_VALID_SET(*msg_word,
  819. htt_tlv_filter->offset_valid);
  820. if (mon_drop_th > 0)
  821. HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
  822. 1);
  823. else
  824. HTT_RX_RING_SELECTION_CFG_DROP_THRESHOLD_VALID_SET(*msg_word,
  825. 0);
  826. /* word 1 */
  827. msg_word++;
  828. *msg_word = 0;
  829. HTT_RX_RING_SELECTION_CFG_RING_BUFFER_SIZE_SET(*msg_word,
  830. ring_buf_size);
  831. /* word 2 */
  832. msg_word++;
  833. *msg_word = 0;
  834. if (htt_tlv_filter->enable_fp) {
  835. /* TYPE: MGMT */
  836. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  837. FP, MGMT, 0000,
  838. (htt_tlv_filter->fp_mgmt_filter &
  839. FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
  840. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  841. FP, MGMT, 0001,
  842. (htt_tlv_filter->fp_mgmt_filter &
  843. FILTER_MGMT_ASSOC_RES) ? 1 : 0);
  844. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  845. FP, MGMT, 0010,
  846. (htt_tlv_filter->fp_mgmt_filter &
  847. FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
  848. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  849. FP, MGMT, 0011,
  850. (htt_tlv_filter->fp_mgmt_filter &
  851. FILTER_MGMT_REASSOC_RES) ? 1 : 0);
  852. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  853. FP, MGMT, 0100,
  854. (htt_tlv_filter->fp_mgmt_filter &
  855. FILTER_MGMT_PROBE_REQ) ? 1 : 0);
  856. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  857. FP, MGMT, 0101,
  858. (htt_tlv_filter->fp_mgmt_filter &
  859. FILTER_MGMT_PROBE_RES) ? 1 : 0);
  860. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  861. FP, MGMT, 0110,
  862. (htt_tlv_filter->fp_mgmt_filter &
  863. FILTER_MGMT_TIM_ADVT) ? 1 : 0);
  864. /* reserved */
  865. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, FP,
  866. MGMT, 0111,
  867. (htt_tlv_filter->fp_mgmt_filter &
  868. FILTER_MGMT_RESERVED_7) ? 1 : 0);
  869. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  870. FP, MGMT, 1000,
  871. (htt_tlv_filter->fp_mgmt_filter &
  872. FILTER_MGMT_BEACON) ? 1 : 0);
  873. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  874. FP, MGMT, 1001,
  875. (htt_tlv_filter->fp_mgmt_filter &
  876. FILTER_MGMT_ATIM) ? 1 : 0);
  877. }
  878. if (htt_tlv_filter->enable_md) {
  879. /* TYPE: MGMT */
  880. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  881. MD, MGMT, 0000,
  882. (htt_tlv_filter->md_mgmt_filter &
  883. FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
  884. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  885. MD, MGMT, 0001,
  886. (htt_tlv_filter->md_mgmt_filter &
  887. FILTER_MGMT_ASSOC_RES) ? 1 : 0);
  888. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  889. MD, MGMT, 0010,
  890. (htt_tlv_filter->md_mgmt_filter &
  891. FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
  892. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  893. MD, MGMT, 0011,
  894. (htt_tlv_filter->md_mgmt_filter &
  895. FILTER_MGMT_REASSOC_RES) ? 1 : 0);
  896. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  897. MD, MGMT, 0100,
  898. (htt_tlv_filter->md_mgmt_filter &
  899. FILTER_MGMT_PROBE_REQ) ? 1 : 0);
  900. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  901. MD, MGMT, 0101,
  902. (htt_tlv_filter->md_mgmt_filter &
  903. FILTER_MGMT_PROBE_RES) ? 1 : 0);
  904. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  905. MD, MGMT, 0110,
  906. (htt_tlv_filter->md_mgmt_filter &
  907. FILTER_MGMT_TIM_ADVT) ? 1 : 0);
  908. /* reserved */
  909. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MD,
  910. MGMT, 0111,
  911. (htt_tlv_filter->md_mgmt_filter &
  912. FILTER_MGMT_RESERVED_7) ? 1 : 0);
  913. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  914. MD, MGMT, 1000,
  915. (htt_tlv_filter->md_mgmt_filter &
  916. FILTER_MGMT_BEACON) ? 1 : 0);
  917. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  918. MD, MGMT, 1001,
  919. (htt_tlv_filter->md_mgmt_filter &
  920. FILTER_MGMT_ATIM) ? 1 : 0);
  921. }
  922. if (htt_tlv_filter->enable_mo) {
  923. /* TYPE: MGMT */
  924. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  925. MO, MGMT, 0000,
  926. (htt_tlv_filter->mo_mgmt_filter &
  927. FILTER_MGMT_ASSOC_REQ) ? 1 : 0);
  928. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  929. MO, MGMT, 0001,
  930. (htt_tlv_filter->mo_mgmt_filter &
  931. FILTER_MGMT_ASSOC_RES) ? 1 : 0);
  932. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  933. MO, MGMT, 0010,
  934. (htt_tlv_filter->mo_mgmt_filter &
  935. FILTER_MGMT_REASSOC_REQ) ? 1 : 0);
  936. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  937. MO, MGMT, 0011,
  938. (htt_tlv_filter->mo_mgmt_filter &
  939. FILTER_MGMT_REASSOC_RES) ? 1 : 0);
  940. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  941. MO, MGMT, 0100,
  942. (htt_tlv_filter->mo_mgmt_filter &
  943. FILTER_MGMT_PROBE_REQ) ? 1 : 0);
  944. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  945. MO, MGMT, 0101,
  946. (htt_tlv_filter->mo_mgmt_filter &
  947. FILTER_MGMT_PROBE_RES) ? 1 : 0);
  948. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  949. MO, MGMT, 0110,
  950. (htt_tlv_filter->mo_mgmt_filter &
  951. FILTER_MGMT_TIM_ADVT) ? 1 : 0);
  952. /* reserved */
  953. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0, MO,
  954. MGMT, 0111,
  955. (htt_tlv_filter->mo_mgmt_filter &
  956. FILTER_MGMT_RESERVED_7) ? 1 : 0);
  957. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  958. MO, MGMT, 1000,
  959. (htt_tlv_filter->mo_mgmt_filter &
  960. FILTER_MGMT_BEACON) ? 1 : 0);
  961. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG0,
  962. MO, MGMT, 1001,
  963. (htt_tlv_filter->mo_mgmt_filter &
  964. FILTER_MGMT_ATIM) ? 1 : 0);
  965. }
  966. /* word 3 */
  967. msg_word++;
  968. *msg_word = 0;
  969. if (htt_tlv_filter->enable_fp) {
  970. /* TYPE: MGMT */
  971. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  972. FP, MGMT, 1010,
  973. (htt_tlv_filter->fp_mgmt_filter &
  974. FILTER_MGMT_DISASSOC) ? 1 : 0);
  975. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  976. FP, MGMT, 1011,
  977. (htt_tlv_filter->fp_mgmt_filter &
  978. FILTER_MGMT_AUTH) ? 1 : 0);
  979. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  980. FP, MGMT, 1100,
  981. (htt_tlv_filter->fp_mgmt_filter &
  982. FILTER_MGMT_DEAUTH) ? 1 : 0);
  983. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  984. FP, MGMT, 1101,
  985. (htt_tlv_filter->fp_mgmt_filter &
  986. FILTER_MGMT_ACTION) ? 1 : 0);
  987. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  988. FP, MGMT, 1110,
  989. (htt_tlv_filter->fp_mgmt_filter &
  990. FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
  991. /* reserved*/
  992. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, FP,
  993. MGMT, 1111,
  994. (htt_tlv_filter->fp_mgmt_filter &
  995. FILTER_MGMT_RESERVED_15) ? 1 : 0);
  996. }
  997. if (htt_tlv_filter->enable_md) {
  998. /* TYPE: MGMT */
  999. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1000. MD, MGMT, 1010,
  1001. (htt_tlv_filter->md_mgmt_filter &
  1002. FILTER_MGMT_DISASSOC) ? 1 : 0);
  1003. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1004. MD, MGMT, 1011,
  1005. (htt_tlv_filter->md_mgmt_filter &
  1006. FILTER_MGMT_AUTH) ? 1 : 0);
  1007. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1008. MD, MGMT, 1100,
  1009. (htt_tlv_filter->md_mgmt_filter &
  1010. FILTER_MGMT_DEAUTH) ? 1 : 0);
  1011. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1012. MD, MGMT, 1101,
  1013. (htt_tlv_filter->md_mgmt_filter &
  1014. FILTER_MGMT_ACTION) ? 1 : 0);
  1015. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1016. MD, MGMT, 1110,
  1017. (htt_tlv_filter->md_mgmt_filter &
  1018. FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
  1019. }
  1020. if (htt_tlv_filter->enable_mo) {
  1021. /* TYPE: MGMT */
  1022. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1023. MO, MGMT, 1010,
  1024. (htt_tlv_filter->mo_mgmt_filter &
  1025. FILTER_MGMT_DISASSOC) ? 1 : 0);
  1026. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1027. MO, MGMT, 1011,
  1028. (htt_tlv_filter->mo_mgmt_filter &
  1029. FILTER_MGMT_AUTH) ? 1 : 0);
  1030. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1031. MO, MGMT, 1100,
  1032. (htt_tlv_filter->mo_mgmt_filter &
  1033. FILTER_MGMT_DEAUTH) ? 1 : 0);
  1034. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1035. MO, MGMT, 1101,
  1036. (htt_tlv_filter->mo_mgmt_filter &
  1037. FILTER_MGMT_ACTION) ? 1 : 0);
  1038. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1,
  1039. MO, MGMT, 1110,
  1040. (htt_tlv_filter->mo_mgmt_filter &
  1041. FILTER_MGMT_ACT_NO_ACK) ? 1 : 0);
  1042. /* reserved*/
  1043. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG1, MO,
  1044. MGMT, 1111,
  1045. (htt_tlv_filter->mo_mgmt_filter &
  1046. FILTER_MGMT_RESERVED_15) ? 1 : 0);
  1047. }
  1048. /* word 4 */
  1049. msg_word++;
  1050. *msg_word = 0;
  1051. if (htt_tlv_filter->enable_fp) {
  1052. /* TYPE: CTRL */
  1053. /* reserved */
  1054. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1055. CTRL, 0000,
  1056. (htt_tlv_filter->fp_ctrl_filter &
  1057. FILTER_CTRL_RESERVED_1) ? 1 : 0);
  1058. /* reserved */
  1059. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1060. CTRL, 0001,
  1061. (htt_tlv_filter->fp_ctrl_filter &
  1062. FILTER_CTRL_RESERVED_2) ? 1 : 0);
  1063. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1064. CTRL, 0010,
  1065. (htt_tlv_filter->fp_ctrl_filter &
  1066. FILTER_CTRL_TRIGGER) ? 1 : 0);
  1067. /* reserved */
  1068. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1069. CTRL, 0011,
  1070. (htt_tlv_filter->fp_ctrl_filter &
  1071. FILTER_CTRL_RESERVED_4) ? 1 : 0);
  1072. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1073. CTRL, 0100,
  1074. (htt_tlv_filter->fp_ctrl_filter &
  1075. FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
  1076. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1077. CTRL, 0101,
  1078. (htt_tlv_filter->fp_ctrl_filter &
  1079. FILTER_CTRL_VHT_NDP) ? 1 : 0);
  1080. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1081. CTRL, 0110,
  1082. (htt_tlv_filter->fp_ctrl_filter &
  1083. FILTER_CTRL_FRAME_EXT) ? 1 : 0);
  1084. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1085. CTRL, 0111,
  1086. (htt_tlv_filter->fp_ctrl_filter &
  1087. FILTER_CTRL_CTRLWRAP) ? 1 : 0);
  1088. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1089. CTRL, 1000,
  1090. (htt_tlv_filter->fp_ctrl_filter &
  1091. FILTER_CTRL_BA_REQ) ? 1 : 0);
  1092. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, FP,
  1093. CTRL, 1001,
  1094. (htt_tlv_filter->fp_ctrl_filter &
  1095. FILTER_CTRL_BA) ? 1 : 0);
  1096. }
  1097. if (htt_tlv_filter->enable_md) {
  1098. /* TYPE: CTRL */
  1099. /* reserved */
  1100. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1101. CTRL, 0000,
  1102. (htt_tlv_filter->md_ctrl_filter &
  1103. FILTER_CTRL_RESERVED_1) ? 1 : 0);
  1104. /* reserved */
  1105. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1106. CTRL, 0001,
  1107. (htt_tlv_filter->md_ctrl_filter &
  1108. FILTER_CTRL_RESERVED_2) ? 1 : 0);
  1109. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1110. CTRL, 0010,
  1111. (htt_tlv_filter->md_ctrl_filter &
  1112. FILTER_CTRL_TRIGGER) ? 1 : 0);
  1113. /* reserved */
  1114. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1115. CTRL, 0011,
  1116. (htt_tlv_filter->md_ctrl_filter &
  1117. FILTER_CTRL_RESERVED_4) ? 1 : 0);
  1118. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1119. CTRL, 0100,
  1120. (htt_tlv_filter->md_ctrl_filter &
  1121. FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
  1122. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1123. CTRL, 0101,
  1124. (htt_tlv_filter->md_ctrl_filter &
  1125. FILTER_CTRL_VHT_NDP) ? 1 : 0);
  1126. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1127. CTRL, 0110,
  1128. (htt_tlv_filter->md_ctrl_filter &
  1129. FILTER_CTRL_FRAME_EXT) ? 1 : 0);
  1130. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1131. CTRL, 0111,
  1132. (htt_tlv_filter->md_ctrl_filter &
  1133. FILTER_CTRL_CTRLWRAP) ? 1 : 0);
  1134. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1135. CTRL, 1000,
  1136. (htt_tlv_filter->md_ctrl_filter &
  1137. FILTER_CTRL_BA_REQ) ? 1 : 0);
  1138. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MD,
  1139. CTRL, 1001,
  1140. (htt_tlv_filter->md_ctrl_filter &
  1141. FILTER_CTRL_BA) ? 1 : 0);
  1142. }
  1143. if (htt_tlv_filter->enable_mo) {
  1144. /* TYPE: CTRL */
  1145. /* reserved */
  1146. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1147. CTRL, 0000,
  1148. (htt_tlv_filter->mo_ctrl_filter &
  1149. FILTER_CTRL_RESERVED_1) ? 1 : 0);
  1150. /* reserved */
  1151. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1152. CTRL, 0001,
  1153. (htt_tlv_filter->mo_ctrl_filter &
  1154. FILTER_CTRL_RESERVED_2) ? 1 : 0);
  1155. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1156. CTRL, 0010,
  1157. (htt_tlv_filter->mo_ctrl_filter &
  1158. FILTER_CTRL_TRIGGER) ? 1 : 0);
  1159. /* reserved */
  1160. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1161. CTRL, 0011,
  1162. (htt_tlv_filter->mo_ctrl_filter &
  1163. FILTER_CTRL_RESERVED_4) ? 1 : 0);
  1164. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1165. CTRL, 0100,
  1166. (htt_tlv_filter->mo_ctrl_filter &
  1167. FILTER_CTRL_BF_REP_POLL) ? 1 : 0);
  1168. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1169. CTRL, 0101,
  1170. (htt_tlv_filter->mo_ctrl_filter &
  1171. FILTER_CTRL_VHT_NDP) ? 1 : 0);
  1172. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1173. CTRL, 0110,
  1174. (htt_tlv_filter->mo_ctrl_filter &
  1175. FILTER_CTRL_FRAME_EXT) ? 1 : 0);
  1176. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1177. CTRL, 0111,
  1178. (htt_tlv_filter->mo_ctrl_filter &
  1179. FILTER_CTRL_CTRLWRAP) ? 1 : 0);
  1180. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1181. CTRL, 1000,
  1182. (htt_tlv_filter->mo_ctrl_filter &
  1183. FILTER_CTRL_BA_REQ) ? 1 : 0);
  1184. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG2, MO,
  1185. CTRL, 1001,
  1186. (htt_tlv_filter->mo_ctrl_filter &
  1187. FILTER_CTRL_BA) ? 1 : 0);
  1188. }
  1189. /* word 5 */
  1190. msg_word++;
  1191. *msg_word = 0;
  1192. if (htt_tlv_filter->enable_fp) {
  1193. /* TYPE: CTRL */
  1194. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
  1195. CTRL, 1010,
  1196. (htt_tlv_filter->fp_ctrl_filter &
  1197. FILTER_CTRL_PSPOLL) ? 1 : 0);
  1198. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
  1199. CTRL, 1011,
  1200. (htt_tlv_filter->fp_ctrl_filter &
  1201. FILTER_CTRL_RTS) ? 1 : 0);
  1202. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
  1203. CTRL, 1100,
  1204. (htt_tlv_filter->fp_ctrl_filter &
  1205. FILTER_CTRL_CTS) ? 1 : 0);
  1206. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
  1207. CTRL, 1101,
  1208. (htt_tlv_filter->fp_ctrl_filter &
  1209. FILTER_CTRL_ACK) ? 1 : 0);
  1210. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
  1211. CTRL, 1110,
  1212. (htt_tlv_filter->fp_ctrl_filter &
  1213. FILTER_CTRL_CFEND) ? 1 : 0);
  1214. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
  1215. CTRL, 1111,
  1216. (htt_tlv_filter->fp_ctrl_filter &
  1217. FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
  1218. /* TYPE: DATA */
  1219. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
  1220. DATA, MCAST,
  1221. (htt_tlv_filter->fp_data_filter &
  1222. FILTER_DATA_MCAST) ? 1 : 0);
  1223. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
  1224. DATA, UCAST,
  1225. (htt_tlv_filter->fp_data_filter &
  1226. FILTER_DATA_UCAST) ? 1 : 0);
  1227. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, FP,
  1228. DATA, NULL,
  1229. (htt_tlv_filter->fp_data_filter &
  1230. FILTER_DATA_NULL) ? 1 : 0);
  1231. }
  1232. if (htt_tlv_filter->enable_md) {
  1233. /* TYPE: CTRL */
  1234. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
  1235. CTRL, 1010,
  1236. (htt_tlv_filter->md_ctrl_filter &
  1237. FILTER_CTRL_PSPOLL) ? 1 : 0);
  1238. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
  1239. CTRL, 1011,
  1240. (htt_tlv_filter->md_ctrl_filter &
  1241. FILTER_CTRL_RTS) ? 1 : 0);
  1242. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
  1243. CTRL, 1100,
  1244. (htt_tlv_filter->md_ctrl_filter &
  1245. FILTER_CTRL_CTS) ? 1 : 0);
  1246. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
  1247. CTRL, 1101,
  1248. (htt_tlv_filter->md_ctrl_filter &
  1249. FILTER_CTRL_ACK) ? 1 : 0);
  1250. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
  1251. CTRL, 1110,
  1252. (htt_tlv_filter->md_ctrl_filter &
  1253. FILTER_CTRL_CFEND) ? 1 : 0);
  1254. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
  1255. CTRL, 1111,
  1256. (htt_tlv_filter->md_ctrl_filter &
  1257. FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
  1258. /* TYPE: DATA */
  1259. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
  1260. DATA, MCAST,
  1261. (htt_tlv_filter->md_data_filter &
  1262. FILTER_DATA_MCAST) ? 1 : 0);
  1263. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
  1264. DATA, UCAST,
  1265. (htt_tlv_filter->md_data_filter &
  1266. FILTER_DATA_UCAST) ? 1 : 0);
  1267. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MD,
  1268. DATA, NULL,
  1269. (htt_tlv_filter->md_data_filter &
  1270. FILTER_DATA_NULL) ? 1 : 0);
  1271. }
  1272. if (htt_tlv_filter->enable_mo) {
  1273. /* TYPE: CTRL */
  1274. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
  1275. CTRL, 1010,
  1276. (htt_tlv_filter->mo_ctrl_filter &
  1277. FILTER_CTRL_PSPOLL) ? 1 : 0);
  1278. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
  1279. CTRL, 1011,
  1280. (htt_tlv_filter->mo_ctrl_filter &
  1281. FILTER_CTRL_RTS) ? 1 : 0);
  1282. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
  1283. CTRL, 1100,
  1284. (htt_tlv_filter->mo_ctrl_filter &
  1285. FILTER_CTRL_CTS) ? 1 : 0);
  1286. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
  1287. CTRL, 1101,
  1288. (htt_tlv_filter->mo_ctrl_filter &
  1289. FILTER_CTRL_ACK) ? 1 : 0);
  1290. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
  1291. CTRL, 1110,
  1292. (htt_tlv_filter->mo_ctrl_filter &
  1293. FILTER_CTRL_CFEND) ? 1 : 0);
  1294. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
  1295. CTRL, 1111,
  1296. (htt_tlv_filter->mo_ctrl_filter &
  1297. FILTER_CTRL_CFEND_CFACK) ? 1 : 0);
  1298. /* TYPE: DATA */
  1299. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
  1300. DATA, MCAST,
  1301. (htt_tlv_filter->mo_data_filter &
  1302. FILTER_DATA_MCAST) ? 1 : 0);
  1303. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
  1304. DATA, UCAST,
  1305. (htt_tlv_filter->mo_data_filter &
  1306. FILTER_DATA_UCAST) ? 1 : 0);
  1307. htt_rx_ring_pkt_enable_subtype_set(*msg_word, FLAG3, MO,
  1308. DATA, NULL,
  1309. (htt_tlv_filter->mo_data_filter &
  1310. FILTER_DATA_NULL) ? 1 : 0);
  1311. }
  1312. /* word 6 */
  1313. msg_word++;
  1314. *msg_word = 0;
  1315. tlv_filter = 0;
  1316. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_START,
  1317. htt_tlv_filter->mpdu_start);
  1318. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_START,
  1319. htt_tlv_filter->msdu_start);
  1320. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET,
  1321. htt_tlv_filter->packet);
  1322. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MSDU_END,
  1323. htt_tlv_filter->msdu_end);
  1324. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, MPDU_END,
  1325. htt_tlv_filter->mpdu_end);
  1326. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PACKET_HEADER,
  1327. htt_tlv_filter->packet_header);
  1328. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, ATTENTION,
  1329. htt_tlv_filter->attention);
  1330. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_START,
  1331. htt_tlv_filter->ppdu_start);
  1332. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END,
  1333. htt_tlv_filter->ppdu_end);
  1334. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_USER_STATS,
  1335. htt_tlv_filter->ppdu_end_user_stats);
  1336. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter,
  1337. PPDU_END_USER_STATS_EXT,
  1338. htt_tlv_filter->ppdu_end_user_stats_ext);
  1339. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, PPDU_END_STATUS_DONE,
  1340. htt_tlv_filter->ppdu_end_status_done);
  1341. /* RESERVED bit maps to header_per_msdu in htt_tlv_filter*/
  1342. htt_rx_ring_tlv_filter_in_enable_set(tlv_filter, RESERVED,
  1343. htt_tlv_filter->header_per_msdu);
  1344. HTT_RX_RING_SELECTION_CFG_TLV_FILTER_IN_FLAG_SET(*msg_word, tlv_filter);
  1345. msg_word++;
  1346. *msg_word = 0;
  1347. if (htt_tlv_filter->offset_valid) {
  1348. HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET_SET(*msg_word,
  1349. htt_tlv_filter->rx_packet_offset);
  1350. HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET_SET(*msg_word,
  1351. htt_tlv_filter->rx_header_offset);
  1352. msg_word++;
  1353. *msg_word = 0;
  1354. HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET_SET(*msg_word,
  1355. htt_tlv_filter->rx_mpdu_end_offset);
  1356. HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET_SET(*msg_word,
  1357. htt_tlv_filter->rx_mpdu_start_offset);
  1358. msg_word++;
  1359. *msg_word = 0;
  1360. HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET_SET(*msg_word,
  1361. htt_tlv_filter->rx_msdu_end_offset);
  1362. HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET_SET(*msg_word,
  1363. htt_tlv_filter->rx_msdu_start_offset);
  1364. msg_word++;
  1365. *msg_word = 0;
  1366. HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET_SET(*msg_word,
  1367. htt_tlv_filter->rx_attn_offset);
  1368. msg_word++;
  1369. *msg_word = 0;
  1370. } else {
  1371. msg_word += 4;
  1372. *msg_word = 0;
  1373. }
  1374. if (mon_drop_th > 0)
  1375. HTT_RX_RING_SELECTION_CFG_RX_DROP_THRESHOLD_SET(*msg_word,
  1376. mon_drop_th);
  1377. /* "response_required" field should be set if a HTT response message is
  1378. * required after setting up the ring.
  1379. */
  1380. pkt = htt_htc_pkt_alloc(soc);
  1381. if (!pkt)
  1382. goto fail1;
  1383. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  1384. SET_HTC_PACKET_INFO_TX(
  1385. &pkt->htc_pkt,
  1386. dp_htt_h2t_send_complete_free_netbuf,
  1387. qdf_nbuf_data(htt_msg),
  1388. qdf_nbuf_len(htt_msg),
  1389. soc->htc_endpoint,
  1390. HTC_TX_PACKET_TAG_RUNTIME_PUT); /* tag for no FW response msg */
  1391. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
  1392. status = DP_HTT_SEND_HTC_PKT(soc, pkt,
  1393. HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
  1394. htt_logger_bufp);
  1395. if (status != QDF_STATUS_SUCCESS) {
  1396. qdf_nbuf_free(htt_msg);
  1397. htt_htc_pkt_free(soc, pkt);
  1398. }
  1399. return status;
  1400. fail1:
  1401. qdf_nbuf_free(htt_msg);
  1402. fail0:
  1403. return QDF_STATUS_E_FAILURE;
  1404. }
  1405. qdf_export_symbol(htt_h2t_rx_ring_cfg);
  1406. #if defined(HTT_STATS_ENABLE)
  1407. static inline QDF_STATUS dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
  1408. struct dp_soc *soc, qdf_nbuf_t htt_msg)
  1409. {
  1410. uint32_t pdev_id;
  1411. uint32_t *msg_word = NULL;
  1412. uint32_t msg_remain_len = 0;
  1413. msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
  1414. /*COOKIE MSB*/
  1415. pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
  1416. /* stats message length + 16 size of HTT header*/
  1417. msg_remain_len = qdf_min(htt_stats->msg_len + 16,
  1418. (uint32_t)DP_EXT_MSG_LENGTH);
  1419. dp_wdi_event_handler(WDI_EVENT_HTT_STATS, soc,
  1420. msg_word, msg_remain_len,
  1421. WDI_NO_VAL, pdev_id);
  1422. if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
  1423. htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
  1424. }
  1425. /* Need to be freed here as WDI handler will
  1426. * make a copy of pkt to send data to application
  1427. */
  1428. qdf_nbuf_free(htt_msg);
  1429. return QDF_STATUS_SUCCESS;
  1430. }
  1431. #else
  1432. static inline QDF_STATUS
  1433. dp_send_htt_stat_resp(struct htt_stats_context *htt_stats,
  1434. struct dp_soc *soc, qdf_nbuf_t htt_msg)
  1435. {
  1436. return QDF_STATUS_E_NOSUPPORT;
  1437. }
  1438. #endif
  1439. #ifdef HTT_STATS_DEBUGFS_SUPPORT
  1440. /* dp_send_htt_stats_dbgfs_msg() - Function to send htt data to upper layer.
  1441. * @pdev: dp pdev handle
  1442. * @msg_word: HTT msg
  1443. * @msg_len: Length of HTT msg sent
  1444. *
  1445. * Return: none
  1446. */
  1447. static inline void
  1448. dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
  1449. uint32_t msg_len)
  1450. {
  1451. struct htt_dbgfs_cfg dbgfs_cfg;
  1452. int done = 0;
  1453. /* send 5th word of HTT msg to upper layer */
  1454. dbgfs_cfg.msg_word = (msg_word + 4);
  1455. dbgfs_cfg.m = pdev->dbgfs_cfg->m;
  1456. /* stats message length + 16 size of HTT header*/
  1457. msg_len = qdf_min(msg_len + HTT_HEADER_LEN, (uint32_t)DP_EXT_MSG_LENGTH);
  1458. if (pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process)
  1459. pdev->dbgfs_cfg->htt_stats_dbgfs_msg_process(&dbgfs_cfg,
  1460. (msg_len - HTT_HEADER_LEN));
  1461. /* Get TLV Done bit from 4th msg word */
  1462. done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
  1463. if (done) {
  1464. if (qdf_event_set(&pdev->dbgfs_cfg->htt_stats_dbgfs_event))
  1465. dp_htt_err("%pK: Failed to set event for debugfs htt stats"
  1466. , pdev->soc);
  1467. }
  1468. }
  1469. #else
  1470. static inline void
  1471. dp_htt_stats_dbgfs_send_msg(struct dp_pdev *pdev, uint32_t *msg_word,
  1472. uint32_t msg_len)
  1473. {
  1474. }
  1475. #endif /* HTT_STATS_DEBUGFS_SUPPORT */
  1476. #ifdef WLAN_SYSFS_DP_STATS
  1477. /* dp_htt_stats_sysfs_update_config() - Function to send htt data to upper layer.
  1478. * @pdev: dp pdev handle
  1479. *
  1480. * This function sets the process id and printing mode within the sysfs config
  1481. * struct. which enables DP_PRINT statements within this process to write to the
  1482. * console buffer provided by the user space.
  1483. *
  1484. * Return: None
  1485. */
  1486. static inline void
  1487. dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
  1488. {
  1489. struct dp_soc *soc = pdev->soc;
  1490. if (!soc) {
  1491. dp_htt_err("soc is null");
  1492. return;
  1493. }
  1494. if (!soc->sysfs_config) {
  1495. dp_htt_err("soc->sysfs_config is NULL");
  1496. return;
  1497. }
  1498. /* set sysfs config parameters */
  1499. soc->sysfs_config->process_id = qdf_get_current_pid();
  1500. soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
  1501. }
  1502. /*
  1503. * dp_htt_stats_sysfs_set_event() - Set sysfs stats event.
  1504. * @soc: soc handle.
  1505. * @msg_word: Pointer to htt msg word.
  1506. *
  1507. * @return: void
  1508. */
  1509. static inline void
  1510. dp_htt_stats_sysfs_set_event(struct dp_soc *soc, uint32_t *msg_word)
  1511. {
  1512. int done = 0;
  1513. done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*(msg_word + 3));
  1514. if (done) {
  1515. if (qdf_event_set(&soc->sysfs_config->sysfs_txrx_fw_request_done))
  1516. dp_htt_err("%pK:event compl Fail to set event ",
  1517. soc);
  1518. }
  1519. }
  1520. #else /* WLAN_SYSFS_DP_STATS */
  1521. static inline void
  1522. dp_htt_stats_sysfs_update_config(struct dp_pdev *pdev)
  1523. {
  1524. }
  1525. static inline void
  1526. dp_htt_stats_sysfs_set_event(struct dp_soc *dp_soc, uint32_t *msg_word)
  1527. {
  1528. }
  1529. #endif /* WLAN_SYSFS_DP_STATS */
  1530. /**
  1531. * dp_process_htt_stat_msg(): Process the list of buffers of HTT EXT stats
  1532. * @htt_stats: htt stats info
  1533. *
  1534. * The FW sends the HTT EXT STATS as a stream of T2H messages. Each T2H message
  1535. * contains sub messages which are identified by a TLV header.
  1536. * In this function we will process the stream of T2H messages and read all the
  1537. * TLV contained in the message.
  1538. *
  1539. * THe following cases have been taken care of
  1540. * Case 1: When the tlv_remain_length <= msg_remain_length of HTT MSG buffer
  1541. * In this case the buffer will contain multiple tlvs.
  1542. * Case 2: When the tlv_remain_length > msg_remain_length of HTT MSG buffer.
  1543. * Only one tlv will be contained in the HTT message and this tag
  1544. * will extend onto the next buffer.
  1545. * Case 3: When the buffer is the continuation of the previous message
  1546. * Case 4: tlv length is 0. which will indicate the end of message
  1547. *
  1548. * return: void
  1549. */
  1550. static inline void dp_process_htt_stat_msg(struct htt_stats_context *htt_stats,
  1551. struct dp_soc *soc)
  1552. {
  1553. htt_tlv_tag_t tlv_type = 0xff;
  1554. qdf_nbuf_t htt_msg = NULL;
  1555. uint32_t *msg_word;
  1556. uint8_t *tlv_buf_head = NULL;
  1557. uint8_t *tlv_buf_tail = NULL;
  1558. uint32_t msg_remain_len = 0;
  1559. uint32_t tlv_remain_len = 0;
  1560. uint32_t *tlv_start;
  1561. int cookie_val = 0;
  1562. int cookie_msb = 0;
  1563. int pdev_id;
  1564. bool copy_stats = false;
  1565. struct dp_pdev *pdev;
  1566. /* Process node in the HTT message queue */
  1567. while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
  1568. != NULL) {
  1569. msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
  1570. cookie_val = *(msg_word + 1);
  1571. htt_stats->msg_len = HTT_T2H_EXT_STATS_CONF_TLV_LENGTH_GET(
  1572. *(msg_word +
  1573. HTT_T2H_EXT_STATS_TLV_START_OFFSET));
  1574. if (cookie_val) {
  1575. if (dp_send_htt_stat_resp(htt_stats, soc, htt_msg)
  1576. == QDF_STATUS_SUCCESS) {
  1577. continue;
  1578. }
  1579. }
  1580. cookie_msb = *(msg_word + 2);
  1581. pdev_id = *(msg_word + 2) & HTT_PID_BIT_MASK;
  1582. pdev = soc->pdev_list[pdev_id];
  1583. if (!cookie_val && (cookie_msb & DBG_STATS_COOKIE_HTT_DBGFS)) {
  1584. dp_htt_stats_dbgfs_send_msg(pdev, msg_word,
  1585. htt_stats->msg_len);
  1586. qdf_nbuf_free(htt_msg);
  1587. continue;
  1588. }
  1589. if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
  1590. dp_htt_stats_sysfs_update_config(pdev);
  1591. if (cookie_msb & DBG_STATS_COOKIE_DP_STATS)
  1592. copy_stats = true;
  1593. /* read 5th word */
  1594. msg_word = msg_word + 4;
  1595. msg_remain_len = qdf_min(htt_stats->msg_len,
  1596. (uint32_t) DP_EXT_MSG_LENGTH);
  1597. /* Keep processing the node till node length is 0 */
  1598. while (msg_remain_len) {
  1599. /*
  1600. * if message is not a continuation of previous message
  1601. * read the tlv type and tlv length
  1602. */
  1603. if (!tlv_buf_head) {
  1604. tlv_type = HTT_STATS_TLV_TAG_GET(
  1605. *msg_word);
  1606. tlv_remain_len = HTT_STATS_TLV_LENGTH_GET(
  1607. *msg_word);
  1608. }
  1609. if (tlv_remain_len == 0) {
  1610. msg_remain_len = 0;
  1611. if (tlv_buf_head) {
  1612. qdf_mem_free(tlv_buf_head);
  1613. tlv_buf_head = NULL;
  1614. tlv_buf_tail = NULL;
  1615. }
  1616. goto error;
  1617. }
  1618. if (!tlv_buf_head)
  1619. tlv_remain_len += HTT_TLV_HDR_LEN;
  1620. if ((tlv_remain_len <= msg_remain_len)) {
  1621. /* Case 3 */
  1622. if (tlv_buf_head) {
  1623. qdf_mem_copy(tlv_buf_tail,
  1624. (uint8_t *)msg_word,
  1625. tlv_remain_len);
  1626. tlv_start = (uint32_t *)tlv_buf_head;
  1627. } else {
  1628. /* Case 1 */
  1629. tlv_start = msg_word;
  1630. }
  1631. if (copy_stats)
  1632. dp_htt_stats_copy_tag(pdev,
  1633. tlv_type,
  1634. tlv_start);
  1635. else
  1636. dp_htt_stats_print_tag(pdev,
  1637. tlv_type,
  1638. tlv_start);
  1639. if (tlv_type == HTT_STATS_PEER_DETAILS_TAG ||
  1640. tlv_type == HTT_STATS_PEER_STATS_CMN_TAG)
  1641. dp_peer_update_inactive_time(pdev,
  1642. tlv_type,
  1643. tlv_start);
  1644. msg_remain_len -= tlv_remain_len;
  1645. msg_word = (uint32_t *)
  1646. (((uint8_t *)msg_word) +
  1647. tlv_remain_len);
  1648. tlv_remain_len = 0;
  1649. if (tlv_buf_head) {
  1650. qdf_mem_free(tlv_buf_head);
  1651. tlv_buf_head = NULL;
  1652. tlv_buf_tail = NULL;
  1653. }
  1654. } else { /* tlv_remain_len > msg_remain_len */
  1655. /* Case 2 & 3 */
  1656. if (!tlv_buf_head) {
  1657. tlv_buf_head = qdf_mem_malloc(
  1658. tlv_remain_len);
  1659. if (!tlv_buf_head) {
  1660. QDF_TRACE(QDF_MODULE_ID_TXRX,
  1661. QDF_TRACE_LEVEL_ERROR,
  1662. "Alloc failed");
  1663. goto error;
  1664. }
  1665. tlv_buf_tail = tlv_buf_head;
  1666. }
  1667. qdf_mem_copy(tlv_buf_tail, (uint8_t *)msg_word,
  1668. msg_remain_len);
  1669. tlv_remain_len -= msg_remain_len;
  1670. tlv_buf_tail += msg_remain_len;
  1671. }
  1672. }
  1673. if (htt_stats->msg_len >= DP_EXT_MSG_LENGTH) {
  1674. htt_stats->msg_len -= DP_EXT_MSG_LENGTH;
  1675. }
  1676. /* indicate event completion in case the event is done */
  1677. if (!cookie_val && (cookie_msb & DBG_SYSFS_STATS_COOKIE))
  1678. dp_htt_stats_sysfs_set_event(soc, msg_word);
  1679. qdf_nbuf_free(htt_msg);
  1680. }
  1681. return;
  1682. error:
  1683. qdf_nbuf_free(htt_msg);
  1684. while ((htt_msg = qdf_nbuf_queue_remove(&htt_stats->msg))
  1685. != NULL)
  1686. qdf_nbuf_free(htt_msg);
  1687. }
  1688. void htt_t2h_stats_handler(void *context)
  1689. {
  1690. struct dp_soc *soc = (struct dp_soc *)context;
  1691. struct htt_stats_context htt_stats;
  1692. uint32_t *msg_word;
  1693. qdf_nbuf_t htt_msg = NULL;
  1694. uint8_t done;
  1695. uint32_t rem_stats;
  1696. if (!soc) {
  1697. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1698. "soc is NULL");
  1699. return;
  1700. }
  1701. if (!qdf_atomic_read(&soc->cmn_init_done)) {
  1702. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1703. "soc: 0x%pK, init_done: %d", soc,
  1704. qdf_atomic_read(&soc->cmn_init_done));
  1705. return;
  1706. }
  1707. qdf_mem_zero(&htt_stats, sizeof(htt_stats));
  1708. qdf_nbuf_queue_init(&htt_stats.msg);
  1709. /* pull one completed stats from soc->htt_stats_msg and process */
  1710. qdf_spin_lock_bh(&soc->htt_stats.lock);
  1711. if (!soc->htt_stats.num_stats) {
  1712. qdf_spin_unlock_bh(&soc->htt_stats.lock);
  1713. return;
  1714. }
  1715. while ((htt_msg = qdf_nbuf_queue_remove(&soc->htt_stats.msg)) != NULL) {
  1716. msg_word = (uint32_t *) qdf_nbuf_data(htt_msg);
  1717. msg_word = msg_word + HTT_T2H_EXT_STATS_TLV_START_OFFSET;
  1718. done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
  1719. qdf_nbuf_queue_add(&htt_stats.msg, htt_msg);
  1720. /*
  1721. * Done bit signifies that this is the last T2H buffer in the
  1722. * stream of HTT EXT STATS message
  1723. */
  1724. if (done)
  1725. break;
  1726. }
  1727. rem_stats = --soc->htt_stats.num_stats;
  1728. qdf_spin_unlock_bh(&soc->htt_stats.lock);
  1729. /* If there are more stats to process, schedule stats work again.
  1730. * Scheduling prior to processing ht_stats to queue with early
  1731. * index
  1732. */
  1733. if (rem_stats)
  1734. qdf_sched_work(0, &soc->htt_stats.work);
  1735. dp_process_htt_stat_msg(&htt_stats, soc);
  1736. }
  1737. /**
  1738. * dp_txrx_fw_stats_handler() - Function to process HTT EXT stats
  1739. * @soc: DP SOC handle
  1740. * @htt_t2h_msg: HTT message nbuf
  1741. *
  1742. * return:void
  1743. */
  1744. static inline void dp_txrx_fw_stats_handler(struct dp_soc *soc,
  1745. qdf_nbuf_t htt_t2h_msg)
  1746. {
  1747. uint8_t done;
  1748. qdf_nbuf_t msg_copy;
  1749. uint32_t *msg_word;
  1750. msg_word = (uint32_t *)qdf_nbuf_data(htt_t2h_msg);
  1751. msg_word = msg_word + 3;
  1752. done = HTT_T2H_EXT_STATS_CONF_TLV_DONE_GET(*msg_word);
  1753. /*
  1754. * HTT EXT stats response comes as stream of TLVs which span over
  1755. * multiple T2H messages.
  1756. * The first message will carry length of the response.
  1757. * For rest of the messages length will be zero.
  1758. *
  1759. * Clone the T2H message buffer and store it in a list to process
  1760. * it later.
  1761. *
  1762. * The original T2H message buffers gets freed in the T2H HTT event
  1763. * handler
  1764. */
  1765. msg_copy = qdf_nbuf_clone(htt_t2h_msg);
  1766. if (!msg_copy) {
  1767. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  1768. "T2H messge clone failed for HTT EXT STATS");
  1769. goto error;
  1770. }
  1771. qdf_spin_lock_bh(&soc->htt_stats.lock);
  1772. qdf_nbuf_queue_add(&soc->htt_stats.msg, msg_copy);
  1773. /*
  1774. * Done bit signifies that this is the last T2H buffer in the stream of
  1775. * HTT EXT STATS message
  1776. */
  1777. if (done) {
  1778. soc->htt_stats.num_stats++;
  1779. qdf_sched_work(0, &soc->htt_stats.work);
  1780. }
  1781. qdf_spin_unlock_bh(&soc->htt_stats.lock);
  1782. return;
  1783. error:
  1784. qdf_spin_lock_bh(&soc->htt_stats.lock);
  1785. while ((msg_copy = qdf_nbuf_queue_remove(&soc->htt_stats.msg))
  1786. != NULL) {
  1787. qdf_nbuf_free(msg_copy);
  1788. }
  1789. soc->htt_stats.num_stats = 0;
  1790. qdf_spin_unlock_bh(&soc->htt_stats.lock);
  1791. return;
  1792. }
  1793. /*
  1794. * htt_soc_attach_target() - SOC level HTT setup
  1795. * @htt_soc: HTT SOC handle
  1796. *
  1797. * Return: 0 on success; error code on failure
  1798. */
  1799. int htt_soc_attach_target(struct htt_soc *htt_soc)
  1800. {
  1801. struct htt_soc *soc = (struct htt_soc *)htt_soc;
  1802. return htt_h2t_ver_req_msg(soc);
  1803. }
  1804. void htt_set_htc_handle(struct htt_soc *htt_soc, HTC_HANDLE htc_soc)
  1805. {
  1806. htt_soc->htc_soc = htc_soc;
  1807. }
  1808. HTC_HANDLE htt_get_htc_handle(struct htt_soc *htt_soc)
  1809. {
  1810. return htt_soc->htc_soc;
  1811. }
  1812. struct htt_soc *htt_soc_attach(struct dp_soc *soc, HTC_HANDLE htc_handle)
  1813. {
  1814. int i;
  1815. int j;
  1816. int alloc_size = HTT_SW_UMAC_RING_IDX_MAX * sizeof(unsigned long);
  1817. struct htt_soc *htt_soc = NULL;
  1818. htt_soc = qdf_mem_malloc(sizeof(*htt_soc));
  1819. if (!htt_soc) {
  1820. dp_err("HTT attach failed");
  1821. return NULL;
  1822. }
  1823. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1824. htt_soc->pdevid_tt[i].umac_ttt = qdf_mem_malloc(alloc_size);
  1825. if (!htt_soc->pdevid_tt[i].umac_ttt)
  1826. break;
  1827. qdf_mem_set(htt_soc->pdevid_tt[i].umac_ttt, alloc_size, -1);
  1828. htt_soc->pdevid_tt[i].lmac_ttt = qdf_mem_malloc(alloc_size);
  1829. if (!htt_soc->pdevid_tt[i].lmac_ttt) {
  1830. qdf_mem_free(htt_soc->pdevid_tt[i].umac_ttt);
  1831. break;
  1832. }
  1833. qdf_mem_set(htt_soc->pdevid_tt[i].lmac_ttt, alloc_size, -1);
  1834. }
  1835. if (i != MAX_PDEV_CNT) {
  1836. for (j = 0; j < i; j++) {
  1837. qdf_mem_free(htt_soc->pdevid_tt[j].umac_ttt);
  1838. qdf_mem_free(htt_soc->pdevid_tt[j].lmac_ttt);
  1839. }
  1840. qdf_mem_free(htt_soc);
  1841. return NULL;
  1842. }
  1843. htt_soc->dp_soc = soc;
  1844. htt_soc->htc_soc = htc_handle;
  1845. HTT_TX_MUTEX_INIT(&htt_soc->htt_tx_mutex);
  1846. return htt_soc;
  1847. }
  1848. #if defined(WDI_EVENT_ENABLE) && \
  1849. !defined(REMOVE_PKT_LOG)
  1850. /*
  1851. * dp_pktlog_msg_handler() - Pktlog msg handler
  1852. * @htt_soc: HTT SOC handle
  1853. * @msg_word: Pointer to payload
  1854. *
  1855. * Return: None
  1856. */
  1857. static void
  1858. dp_pktlog_msg_handler(struct htt_soc *soc,
  1859. uint32_t *msg_word)
  1860. {
  1861. uint8_t pdev_id;
  1862. uint8_t target_pdev_id;
  1863. uint32_t *pl_hdr;
  1864. target_pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
  1865. pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
  1866. target_pdev_id);
  1867. pl_hdr = (msg_word + 1);
  1868. dp_wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
  1869. pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL,
  1870. pdev_id);
  1871. }
  1872. #else
  1873. static void
  1874. dp_pktlog_msg_handler(struct htt_soc *soc,
  1875. uint32_t *msg_word)
  1876. {
  1877. }
  1878. #endif
  1879. /*
  1880. * time_allow_print() - time allow print
  1881. * @htt_ring_tt: ringi_id array of timestamps
  1882. * @ring_id: ring_id (index)
  1883. *
  1884. * Return: 1 for successfully saving timestamp in array
  1885. * and 0 for timestamp falling within 2 seconds after last one
  1886. */
  1887. static bool time_allow_print(unsigned long *htt_ring_tt, u_int8_t ring_id)
  1888. {
  1889. unsigned long tstamp;
  1890. unsigned long delta;
  1891. tstamp = qdf_get_system_timestamp();
  1892. if (!htt_ring_tt)
  1893. return 0; //unable to print backpressure messages
  1894. if (htt_ring_tt[ring_id] == -1) {
  1895. htt_ring_tt[ring_id] = tstamp;
  1896. return 1;
  1897. }
  1898. delta = tstamp - htt_ring_tt[ring_id];
  1899. if (delta >= 2000) {
  1900. htt_ring_tt[ring_id] = tstamp;
  1901. return 1;
  1902. }
  1903. return 0;
  1904. }
  1905. static void dp_htt_alert_print(enum htt_t2h_msg_type msg_type,
  1906. struct dp_pdev *pdev, u_int8_t ring_id,
  1907. u_int16_t hp_idx, u_int16_t tp_idx,
  1908. u_int32_t bkp_time, char *ring_stype)
  1909. {
  1910. dp_alert("seq_num %u msg_type: %d pdev_id: %d ring_type: %s ",
  1911. pdev->bkp_stats.seq_num, msg_type, pdev->pdev_id, ring_stype);
  1912. dp_alert("ring_id: %d hp_idx: %d tp_idx: %d bkpressure_time_ms: %d ",
  1913. ring_id, hp_idx, tp_idx, bkp_time);
  1914. }
  1915. /**
  1916. * dp_get_srng_ring_state_from_hal(): Get hal level ring stats
  1917. * @soc: DP_SOC handle
  1918. * @srng: DP_SRNG handle
  1919. * @ring_type: srng src/dst ring
  1920. *
  1921. * Return: void
  1922. */
  1923. static QDF_STATUS
  1924. dp_get_srng_ring_state_from_hal(struct dp_soc *soc,
  1925. struct dp_pdev *pdev,
  1926. struct dp_srng *srng,
  1927. enum hal_ring_type ring_type,
  1928. struct dp_srng_ring_state *state)
  1929. {
  1930. struct hal_soc *hal_soc;
  1931. if (!soc || !srng || !srng->hal_srng || !state)
  1932. return QDF_STATUS_E_INVAL;
  1933. hal_soc = (struct hal_soc *)soc->hal_soc;
  1934. hal_get_sw_hptp(soc->hal_soc, srng->hal_srng, &state->sw_tail,
  1935. &state->sw_head);
  1936. hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &state->hw_head,
  1937. &state->hw_tail, ring_type);
  1938. state->ring_type = ring_type;
  1939. return QDF_STATUS_SUCCESS;
  1940. }
  1941. #ifdef QCA_MONITOR_PKT_SUPPORT
  1942. static void
  1943. dp_queue_mon_ring_stats(struct dp_pdev *pdev,
  1944. int lmac_id, uint32_t *num_srng,
  1945. struct dp_soc_srngs_state *soc_srngs_state)
  1946. {
  1947. QDF_STATUS status;
  1948. if (pdev->soc->wlan_cfg_ctx->rxdma1_enable) {
  1949. status = dp_get_srng_ring_state_from_hal
  1950. (pdev->soc, pdev,
  1951. &pdev->soc->rxdma_mon_buf_ring[lmac_id],
  1952. RXDMA_MONITOR_BUF,
  1953. &soc_srngs_state->ring_state[*num_srng]);
  1954. if (status == QDF_STATUS_SUCCESS)
  1955. qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
  1956. status = dp_get_srng_ring_state_from_hal
  1957. (pdev->soc, pdev,
  1958. &pdev->soc->rxdma_mon_dst_ring[lmac_id],
  1959. RXDMA_MONITOR_DST,
  1960. &soc_srngs_state->ring_state[*num_srng]);
  1961. if (status == QDF_STATUS_SUCCESS)
  1962. qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
  1963. status = dp_get_srng_ring_state_from_hal
  1964. (pdev->soc, pdev,
  1965. &pdev->soc->rxdma_mon_desc_ring[lmac_id],
  1966. RXDMA_MONITOR_DESC,
  1967. &soc_srngs_state->ring_state[*num_srng]);
  1968. if (status == QDF_STATUS_SUCCESS)
  1969. qdf_assert_always(++(*num_srng) < DP_MAX_SRNGS);
  1970. }
  1971. }
  1972. #else
  1973. static void
  1974. dp_queue_mon_ring_stats(struct dp_pdev *pdev,
  1975. int lmac_id, uint32_t *num_srng,
  1976. struct dp_soc_srngs_state *soc_srngs_state)
  1977. {
  1978. }
  1979. #endif
  1980. /**
  1981. * dp_queue_srng_ring_stats(): Print pdev hal level ring stats
  1982. * @pdev: DP_pdev handle
  1983. *
  1984. * Return: void
  1985. */
  1986. static void dp_queue_ring_stats(struct dp_pdev *pdev)
  1987. {
  1988. uint32_t i;
  1989. int mac_id;
  1990. int lmac_id;
  1991. uint32_t j = 0;
  1992. struct dp_soc_srngs_state * soc_srngs_state = NULL;
  1993. struct dp_soc_srngs_state *drop_srngs_state = NULL;
  1994. QDF_STATUS status;
  1995. soc_srngs_state = qdf_mem_malloc(sizeof(struct dp_soc_srngs_state));
  1996. if (!soc_srngs_state) {
  1997. dp_htt_alert("Memory alloc failed for back pressure event");
  1998. return;
  1999. }
  2000. status = dp_get_srng_ring_state_from_hal
  2001. (pdev->soc, pdev,
  2002. &pdev->soc->reo_exception_ring,
  2003. REO_EXCEPTION,
  2004. &soc_srngs_state->ring_state[j]);
  2005. if (status == QDF_STATUS_SUCCESS)
  2006. qdf_assert_always(++j < DP_MAX_SRNGS);
  2007. status = dp_get_srng_ring_state_from_hal
  2008. (pdev->soc, pdev,
  2009. &pdev->soc->reo_reinject_ring,
  2010. REO_REINJECT,
  2011. &soc_srngs_state->ring_state[j]);
  2012. if (status == QDF_STATUS_SUCCESS)
  2013. qdf_assert_always(++j < DP_MAX_SRNGS);
  2014. status = dp_get_srng_ring_state_from_hal
  2015. (pdev->soc, pdev,
  2016. &pdev->soc->reo_cmd_ring,
  2017. REO_CMD,
  2018. &soc_srngs_state->ring_state[j]);
  2019. if (status == QDF_STATUS_SUCCESS)
  2020. qdf_assert_always(++j < DP_MAX_SRNGS);
  2021. status = dp_get_srng_ring_state_from_hal
  2022. (pdev->soc, pdev,
  2023. &pdev->soc->reo_status_ring,
  2024. REO_STATUS,
  2025. &soc_srngs_state->ring_state[j]);
  2026. if (status == QDF_STATUS_SUCCESS)
  2027. qdf_assert_always(++j < DP_MAX_SRNGS);
  2028. status = dp_get_srng_ring_state_from_hal
  2029. (pdev->soc, pdev,
  2030. &pdev->soc->rx_rel_ring,
  2031. WBM2SW_RELEASE,
  2032. &soc_srngs_state->ring_state[j]);
  2033. if (status == QDF_STATUS_SUCCESS)
  2034. qdf_assert_always(++j < DP_MAX_SRNGS);
  2035. status = dp_get_srng_ring_state_from_hal
  2036. (pdev->soc, pdev,
  2037. &pdev->soc->tcl_cmd_credit_ring,
  2038. TCL_CMD_CREDIT,
  2039. &soc_srngs_state->ring_state[j]);
  2040. if (status == QDF_STATUS_SUCCESS)
  2041. qdf_assert_always(++j < DP_MAX_SRNGS);
  2042. status = dp_get_srng_ring_state_from_hal
  2043. (pdev->soc, pdev,
  2044. &pdev->soc->tcl_status_ring,
  2045. TCL_STATUS,
  2046. &soc_srngs_state->ring_state[j]);
  2047. if (status == QDF_STATUS_SUCCESS)
  2048. qdf_assert_always(++j < DP_MAX_SRNGS);
  2049. status = dp_get_srng_ring_state_from_hal
  2050. (pdev->soc, pdev,
  2051. &pdev->soc->wbm_desc_rel_ring,
  2052. SW2WBM_RELEASE,
  2053. &soc_srngs_state->ring_state[j]);
  2054. if (status == QDF_STATUS_SUCCESS)
  2055. qdf_assert_always(++j < DP_MAX_SRNGS);
  2056. for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
  2057. status = dp_get_srng_ring_state_from_hal
  2058. (pdev->soc, pdev,
  2059. &pdev->soc->reo_dest_ring[i],
  2060. REO_DST,
  2061. &soc_srngs_state->ring_state[j]);
  2062. if (status == QDF_STATUS_SUCCESS)
  2063. qdf_assert_always(++j < DP_MAX_SRNGS);
  2064. }
  2065. for (i = 0; i < pdev->soc->num_tcl_data_rings; i++) {
  2066. status = dp_get_srng_ring_state_from_hal
  2067. (pdev->soc, pdev,
  2068. &pdev->soc->tcl_data_ring[i],
  2069. TCL_DATA,
  2070. &soc_srngs_state->ring_state[j]);
  2071. if (status == QDF_STATUS_SUCCESS)
  2072. qdf_assert_always(++j < DP_MAX_SRNGS);
  2073. }
  2074. for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
  2075. status = dp_get_srng_ring_state_from_hal
  2076. (pdev->soc, pdev,
  2077. &pdev->soc->tx_comp_ring[i],
  2078. WBM2SW_RELEASE,
  2079. &soc_srngs_state->ring_state[j]);
  2080. if (status == QDF_STATUS_SUCCESS)
  2081. qdf_assert_always(++j < DP_MAX_SRNGS);
  2082. }
  2083. lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc, 0, pdev->pdev_id);
  2084. status = dp_get_srng_ring_state_from_hal
  2085. (pdev->soc, pdev,
  2086. &pdev->soc->rx_refill_buf_ring
  2087. [lmac_id],
  2088. RXDMA_BUF,
  2089. &soc_srngs_state->ring_state[j]);
  2090. if (status == QDF_STATUS_SUCCESS)
  2091. qdf_assert_always(++j < DP_MAX_SRNGS);
  2092. status = dp_get_srng_ring_state_from_hal
  2093. (pdev->soc, pdev,
  2094. &pdev->rx_refill_buf_ring2,
  2095. RXDMA_BUF,
  2096. &soc_srngs_state->ring_state[j]);
  2097. if (status == QDF_STATUS_SUCCESS)
  2098. qdf_assert_always(++j < DP_MAX_SRNGS);
  2099. for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
  2100. dp_get_srng_ring_state_from_hal
  2101. (pdev->soc, pdev,
  2102. &pdev->rx_mac_buf_ring[i],
  2103. RXDMA_BUF,
  2104. &soc_srngs_state->ring_state[j]);
  2105. if (status == QDF_STATUS_SUCCESS)
  2106. qdf_assert_always(++j < DP_MAX_SRNGS);
  2107. }
  2108. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  2109. lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
  2110. mac_id, pdev->pdev_id);
  2111. dp_queue_mon_ring_stats(pdev, lmac_id, &j,
  2112. soc_srngs_state);
  2113. status = dp_get_srng_ring_state_from_hal
  2114. (pdev->soc, pdev,
  2115. &pdev->soc->rxdma_mon_status_ring[lmac_id],
  2116. RXDMA_MONITOR_STATUS,
  2117. &soc_srngs_state->ring_state[j]);
  2118. if (status == QDF_STATUS_SUCCESS)
  2119. qdf_assert_always(++j < DP_MAX_SRNGS);
  2120. }
  2121. for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
  2122. lmac_id = dp_get_lmac_id_for_pdev_id(pdev->soc,
  2123. i, pdev->pdev_id);
  2124. status = dp_get_srng_ring_state_from_hal
  2125. (pdev->soc, pdev,
  2126. &pdev->soc->rxdma_err_dst_ring
  2127. [lmac_id],
  2128. RXDMA_DST,
  2129. &soc_srngs_state->ring_state[j]);
  2130. if (status == QDF_STATUS_SUCCESS)
  2131. qdf_assert_always(++j < DP_MAX_SRNGS);
  2132. }
  2133. soc_srngs_state->max_ring_id = j;
  2134. qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
  2135. soc_srngs_state->seq_num = pdev->bkp_stats.seq_num;
  2136. if (pdev->bkp_stats.queue_depth >= HTT_BKP_STATS_MAX_QUEUE_DEPTH) {
  2137. drop_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
  2138. qdf_assert_always(drop_srngs_state);
  2139. TAILQ_REMOVE(&pdev->bkp_stats.list, drop_srngs_state,
  2140. list_elem);
  2141. qdf_mem_free(drop_srngs_state);
  2142. pdev->bkp_stats.queue_depth--;
  2143. }
  2144. pdev->bkp_stats.queue_depth++;
  2145. TAILQ_INSERT_TAIL(&pdev->bkp_stats.list, soc_srngs_state,
  2146. list_elem);
  2147. pdev->bkp_stats.seq_num++;
  2148. qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
  2149. qdf_queue_work(0, pdev->bkp_stats.work_queue,
  2150. &pdev->bkp_stats.work);
  2151. }
  2152. /*
  2153. * dp_htt_bkp_event_alert() - htt backpressure event alert
  2154. * @msg_word: htt packet context
  2155. * @htt_soc: HTT SOC handle
  2156. *
  2157. * Return: after attempting to print stats
  2158. */
  2159. static void dp_htt_bkp_event_alert(u_int32_t *msg_word, struct htt_soc *soc)
  2160. {
  2161. u_int8_t ring_type;
  2162. u_int8_t pdev_id;
  2163. uint8_t target_pdev_id;
  2164. u_int8_t ring_id;
  2165. u_int16_t hp_idx;
  2166. u_int16_t tp_idx;
  2167. u_int32_t bkp_time;
  2168. enum htt_t2h_msg_type msg_type;
  2169. struct dp_soc *dpsoc;
  2170. struct dp_pdev *pdev;
  2171. struct dp_htt_timestamp *radio_tt;
  2172. if (!soc)
  2173. return;
  2174. dpsoc = (struct dp_soc *)soc->dp_soc;
  2175. msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
  2176. ring_type = HTT_T2H_RX_BKPRESSURE_RING_TYPE_GET(*msg_word);
  2177. target_pdev_id = HTT_T2H_RX_BKPRESSURE_PDEV_ID_GET(*msg_word);
  2178. pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
  2179. target_pdev_id);
  2180. if (pdev_id >= MAX_PDEV_CNT) {
  2181. dp_htt_debug("%pK: pdev id %d is invalid", soc, pdev_id);
  2182. return;
  2183. }
  2184. pdev = (struct dp_pdev *)dpsoc->pdev_list[pdev_id];
  2185. ring_id = HTT_T2H_RX_BKPRESSURE_RINGID_GET(*msg_word);
  2186. hp_idx = HTT_T2H_RX_BKPRESSURE_HEAD_IDX_GET(*(msg_word + 1));
  2187. tp_idx = HTT_T2H_RX_BKPRESSURE_TAIL_IDX_GET(*(msg_word + 1));
  2188. bkp_time = HTT_T2H_RX_BKPRESSURE_TIME_MS_GET(*(msg_word + 2));
  2189. radio_tt = &soc->pdevid_tt[pdev_id];
  2190. switch (ring_type) {
  2191. case HTT_SW_RING_TYPE_UMAC:
  2192. if (!time_allow_print(radio_tt->umac_ttt, ring_id))
  2193. return;
  2194. dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
  2195. bkp_time, "HTT_SW_RING_TYPE_UMAC");
  2196. break;
  2197. case HTT_SW_RING_TYPE_LMAC:
  2198. if (!time_allow_print(radio_tt->lmac_ttt, ring_id))
  2199. return;
  2200. dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
  2201. bkp_time, "HTT_SW_RING_TYPE_LMAC");
  2202. break;
  2203. default:
  2204. dp_htt_alert_print(msg_type, pdev, ring_id, hp_idx, tp_idx,
  2205. bkp_time, "UNKNOWN");
  2206. break;
  2207. }
  2208. dp_queue_ring_stats(pdev);
  2209. }
  2210. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  2211. /*
  2212. * dp_offload_ind_handler() - offload msg handler
  2213. * @htt_soc: HTT SOC handle
  2214. * @msg_word: Pointer to payload
  2215. *
  2216. * Return: None
  2217. */
  2218. static void
  2219. dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
  2220. {
  2221. u_int8_t pdev_id;
  2222. u_int8_t target_pdev_id;
  2223. target_pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
  2224. pdev_id = dp_get_host_pdev_id_for_target_pdev_id(soc->dp_soc,
  2225. target_pdev_id);
  2226. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_OFFLOAD_TX_DATA, soc->dp_soc,
  2227. msg_word, HTT_INVALID_VDEV, WDI_NO_VAL,
  2228. pdev_id);
  2229. }
  2230. #else
  2231. static void
  2232. dp_offload_ind_handler(struct htt_soc *soc, uint32_t *msg_word)
  2233. {
  2234. }
  2235. #endif
  2236. /*
  2237. * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
  2238. * @context: Opaque context (HTT SOC handle)
  2239. * @pkt: HTC packet
  2240. */
  2241. static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
  2242. {
  2243. struct htt_soc *soc = (struct htt_soc *) context;
  2244. qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
  2245. u_int32_t *msg_word;
  2246. enum htt_t2h_msg_type msg_type;
  2247. bool free_buf = true;
  2248. /* check for successful message reception */
  2249. if (pkt->Status != QDF_STATUS_SUCCESS) {
  2250. if (pkt->Status != QDF_STATUS_E_CANCELED)
  2251. soc->stats.htc_err_cnt++;
  2252. qdf_nbuf_free(htt_t2h_msg);
  2253. return;
  2254. }
  2255. /* TODO: Check if we should pop the HTC/HTT header alignment padding */
  2256. msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
  2257. msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
  2258. htt_event_record(soc->htt_logger_handle,
  2259. msg_type, (uint8_t *)msg_word);
  2260. switch (msg_type) {
  2261. case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
  2262. {
  2263. dp_htt_bkp_event_alert(msg_word, soc);
  2264. break;
  2265. }
  2266. case HTT_T2H_MSG_TYPE_PEER_MAP:
  2267. {
  2268. u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
  2269. u_int8_t *peer_mac_addr;
  2270. u_int16_t peer_id;
  2271. u_int16_t hw_peer_id;
  2272. u_int8_t vdev_id;
  2273. u_int8_t is_wds;
  2274. struct dp_soc *dpsoc = (struct dp_soc *)soc->dp_soc;
  2275. peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
  2276. hw_peer_id =
  2277. HTT_RX_PEER_MAP_HW_PEER_ID_GET(*(msg_word+2));
  2278. vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
  2279. peer_mac_addr = htt_t2h_mac_addr_deswizzle(
  2280. (u_int8_t *) (msg_word+1),
  2281. &mac_addr_deswizzle_buf[0]);
  2282. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2283. QDF_TRACE_LEVEL_INFO,
  2284. "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
  2285. peer_id, vdev_id);
  2286. /*
  2287. * check if peer already exists for this peer_id, if so
  2288. * this peer map event is in response for a wds peer add
  2289. * wmi command sent during wds source port learning.
  2290. * in this case just add the ast entry to the existing
  2291. * peer ast_list.
  2292. */
  2293. is_wds = !!(dpsoc->peer_id_to_obj_map[peer_id]);
  2294. dp_rx_peer_map_handler(soc->dp_soc, peer_id, hw_peer_id,
  2295. vdev_id, peer_mac_addr, 0,
  2296. is_wds);
  2297. break;
  2298. }
  2299. case HTT_T2H_MSG_TYPE_PEER_UNMAP:
  2300. {
  2301. u_int16_t peer_id;
  2302. u_int8_t vdev_id;
  2303. u_int8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
  2304. peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
  2305. vdev_id = HTT_RX_PEER_UNMAP_VDEV_ID_GET(*msg_word);
  2306. dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
  2307. vdev_id, mac_addr, 0,
  2308. DP_PEER_WDS_COUNT_INVALID);
  2309. break;
  2310. }
  2311. case HTT_T2H_MSG_TYPE_SEC_IND:
  2312. {
  2313. u_int16_t peer_id;
  2314. enum cdp_sec_type sec_type;
  2315. int is_unicast;
  2316. peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
  2317. sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
  2318. is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
  2319. /* point to the first part of the Michael key */
  2320. msg_word++;
  2321. dp_rx_sec_ind_handler(
  2322. soc->dp_soc, peer_id, sec_type, is_unicast,
  2323. msg_word, msg_word + 2);
  2324. break;
  2325. }
  2326. case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
  2327. {
  2328. free_buf =
  2329. dp_monitor_ppdu_stats_ind_handler(soc,
  2330. msg_word,
  2331. htt_t2h_msg);
  2332. break;
  2333. }
  2334. case HTT_T2H_MSG_TYPE_PKTLOG:
  2335. {
  2336. dp_pktlog_msg_handler(soc, msg_word);
  2337. break;
  2338. }
  2339. case HTT_T2H_MSG_TYPE_VERSION_CONF:
  2340. {
  2341. /*
  2342. * HTC maintains runtime pm count for H2T messages that
  2343. * have a response msg from FW. This count ensures that
  2344. * in the case FW does not sent out the response or host
  2345. * did not process this indication runtime_put happens
  2346. * properly in the cleanup path.
  2347. */
  2348. if (htc_dec_return_runtime_cnt(soc->htc_soc) >= 0)
  2349. htc_pm_runtime_put(soc->htc_soc);
  2350. else
  2351. soc->stats.htt_ver_req_put_skip++;
  2352. soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
  2353. soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
  2354. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
  2355. "target uses HTT version %d.%d; host uses %d.%d",
  2356. soc->tgt_ver.major, soc->tgt_ver.minor,
  2357. HTT_CURRENT_VERSION_MAJOR,
  2358. HTT_CURRENT_VERSION_MINOR);
  2359. if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
  2360. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2361. QDF_TRACE_LEVEL_WARN,
  2362. "*** Incompatible host/target HTT versions!");
  2363. }
  2364. /* abort if the target is incompatible with the host */
  2365. qdf_assert(soc->tgt_ver.major ==
  2366. HTT_CURRENT_VERSION_MAJOR);
  2367. if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
  2368. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2369. QDF_TRACE_LEVEL_INFO_LOW,
  2370. "*** Warning: host/target HTT versions"
  2371. " are different, though compatible!");
  2372. }
  2373. break;
  2374. }
  2375. case HTT_T2H_MSG_TYPE_RX_ADDBA:
  2376. {
  2377. uint16_t peer_id;
  2378. uint8_t tid;
  2379. uint8_t win_sz;
  2380. uint16_t status;
  2381. struct dp_peer *peer;
  2382. /*
  2383. * Update REO Queue Desc with new values
  2384. */
  2385. peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
  2386. tid = HTT_RX_ADDBA_TID_GET(*msg_word);
  2387. win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
  2388. peer = dp_peer_get_ref_by_id(soc->dp_soc, peer_id,
  2389. DP_MOD_ID_HTT);
  2390. /*
  2391. * Window size needs to be incremented by 1
  2392. * since fw needs to represent a value of 256
  2393. * using just 8 bits
  2394. */
  2395. if (peer) {
  2396. status = dp_addba_requestprocess_wifi3(
  2397. (struct cdp_soc_t *)soc->dp_soc,
  2398. peer->mac_addr.raw, peer->vdev->vdev_id,
  2399. 0, tid, 0, win_sz + 1, 0xffff);
  2400. /*
  2401. * If PEER_LOCK_REF_PROTECT enbled dec ref
  2402. * which is inc by dp_peer_get_ref_by_id
  2403. */
  2404. dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
  2405. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2406. QDF_TRACE_LEVEL_INFO,
  2407. FL("PeerID %d BAW %d TID %d stat %d"),
  2408. peer_id, win_sz, tid, status);
  2409. } else {
  2410. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2411. QDF_TRACE_LEVEL_ERROR,
  2412. FL("Peer not found peer id %d"),
  2413. peer_id);
  2414. }
  2415. break;
  2416. }
  2417. case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
  2418. {
  2419. dp_txrx_fw_stats_handler(soc->dp_soc, htt_t2h_msg);
  2420. break;
  2421. }
  2422. case HTT_T2H_MSG_TYPE_PEER_MAP_V2:
  2423. {
  2424. u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
  2425. u_int8_t *peer_mac_addr;
  2426. u_int16_t peer_id;
  2427. u_int16_t hw_peer_id;
  2428. u_int8_t vdev_id;
  2429. bool is_wds;
  2430. u_int16_t ast_hash;
  2431. struct dp_ast_flow_override_info ast_flow_info;
  2432. qdf_mem_set(&ast_flow_info, 0,
  2433. sizeof(struct dp_ast_flow_override_info));
  2434. peer_id = HTT_RX_PEER_MAP_V2_SW_PEER_ID_GET(*msg_word);
  2435. hw_peer_id =
  2436. HTT_RX_PEER_MAP_V2_HW_PEER_ID_GET(*(msg_word + 2));
  2437. vdev_id = HTT_RX_PEER_MAP_V2_VDEV_ID_GET(*msg_word);
  2438. peer_mac_addr =
  2439. htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
  2440. &mac_addr_deswizzle_buf[0]);
  2441. is_wds =
  2442. HTT_RX_PEER_MAP_V2_NEXT_HOP_GET(*(msg_word + 3));
  2443. ast_hash =
  2444. HTT_RX_PEER_MAP_V2_AST_HASH_VALUE_GET(*(msg_word + 3));
  2445. /*
  2446. * Update 4 ast_index per peer, ast valid mask
  2447. * and TID flow valid mask.
  2448. * AST valid mask is 3 bit field corresponds to
  2449. * ast_index[3:1]. ast_index 0 is always valid.
  2450. */
  2451. ast_flow_info.ast_valid_mask =
  2452. HTT_RX_PEER_MAP_V2_AST_VALID_MASK_GET(*(msg_word + 3));
  2453. ast_flow_info.ast_idx[0] = hw_peer_id;
  2454. ast_flow_info.ast_flow_mask[0] =
  2455. HTT_RX_PEER_MAP_V2_AST_0_FLOW_MASK_GET(*(msg_word + 4));
  2456. ast_flow_info.ast_idx[1] =
  2457. HTT_RX_PEER_MAP_V2_AST_INDEX_1_GET(*(msg_word + 4));
  2458. ast_flow_info.ast_flow_mask[1] =
  2459. HTT_RX_PEER_MAP_V2_AST_1_FLOW_MASK_GET(*(msg_word + 4));
  2460. ast_flow_info.ast_idx[2] =
  2461. HTT_RX_PEER_MAP_V2_AST_INDEX_2_GET(*(msg_word + 5));
  2462. ast_flow_info.ast_flow_mask[2] =
  2463. HTT_RX_PEER_MAP_V2_AST_2_FLOW_MASK_GET(*(msg_word + 4));
  2464. ast_flow_info.ast_idx[3] =
  2465. HTT_RX_PEER_MAP_V2_AST_INDEX_3_GET(*(msg_word + 6));
  2466. ast_flow_info.ast_flow_mask[3] =
  2467. HTT_RX_PEER_MAP_V2_AST_3_FLOW_MASK_GET(*(msg_word + 4));
  2468. /*
  2469. * TID valid mask is applicable only
  2470. * for HI and LOW priority flows.
  2471. * tid_valid_mas is 8 bit field corresponds
  2472. * to TID[7:0]
  2473. */
  2474. ast_flow_info.tid_valid_low_pri_mask =
  2475. HTT_RX_PEER_MAP_V2_TID_VALID_LOW_PRI_GET(*(msg_word + 5));
  2476. ast_flow_info.tid_valid_hi_pri_mask =
  2477. HTT_RX_PEER_MAP_V2_TID_VALID_HI_PRI_GET(*(msg_word + 5));
  2478. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2479. QDF_TRACE_LEVEL_INFO,
  2480. "HTT_T2H_MSG_TYPE_PEER_MAP msg for peer id %d vdev id %d n",
  2481. peer_id, vdev_id);
  2482. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2483. QDF_TRACE_LEVEL_INFO,
  2484. "ast_idx[0] %d ast_idx[1] %d ast_idx[2] %d ast_idx[3] %d n",
  2485. ast_flow_info.ast_idx[0],
  2486. ast_flow_info.ast_idx[1],
  2487. ast_flow_info.ast_idx[2],
  2488. ast_flow_info.ast_idx[3]);
  2489. dp_rx_peer_map_handler(soc->dp_soc, peer_id,
  2490. hw_peer_id, vdev_id,
  2491. peer_mac_addr, ast_hash,
  2492. is_wds);
  2493. /*
  2494. * Update ast indexes for flow override support
  2495. * Applicable only for non wds peers
  2496. */
  2497. if (!soc->dp_soc->ast_offload_support)
  2498. dp_peer_ast_index_flow_queue_map_create(
  2499. soc->dp_soc, is_wds,
  2500. peer_id, peer_mac_addr,
  2501. &ast_flow_info);
  2502. break;
  2503. }
  2504. case HTT_T2H_MSG_TYPE_PEER_UNMAP_V2:
  2505. {
  2506. u_int8_t mac_addr_deswizzle_buf[QDF_MAC_ADDR_SIZE];
  2507. u_int8_t *mac_addr;
  2508. u_int16_t peer_id;
  2509. u_int8_t vdev_id;
  2510. u_int8_t is_wds;
  2511. u_int32_t free_wds_count;
  2512. peer_id =
  2513. HTT_RX_PEER_UNMAP_V2_SW_PEER_ID_GET(*msg_word);
  2514. vdev_id = HTT_RX_PEER_UNMAP_V2_VDEV_ID_GET(*msg_word);
  2515. mac_addr =
  2516. htt_t2h_mac_addr_deswizzle((u_int8_t *)(msg_word + 1),
  2517. &mac_addr_deswizzle_buf[0]);
  2518. is_wds =
  2519. HTT_RX_PEER_UNMAP_V2_NEXT_HOP_GET(*(msg_word + 2));
  2520. free_wds_count =
  2521. HTT_RX_PEER_UNMAP_V2_PEER_WDS_FREE_COUNT_GET(*(msg_word + 4));
  2522. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2523. QDF_TRACE_LEVEL_INFO,
  2524. "HTT_T2H_MSG_TYPE_PEER_UNMAP msg for peer id %d vdev id %d n",
  2525. peer_id, vdev_id);
  2526. dp_rx_peer_unmap_handler(soc->dp_soc, peer_id,
  2527. vdev_id, mac_addr,
  2528. is_wds, free_wds_count);
  2529. break;
  2530. }
  2531. case HTT_T2H_MSG_TYPE_RX_DELBA:
  2532. {
  2533. uint16_t peer_id;
  2534. uint8_t tid;
  2535. uint8_t win_sz;
  2536. QDF_STATUS status;
  2537. peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
  2538. tid = HTT_RX_DELBA_TID_GET(*msg_word);
  2539. win_sz = HTT_RX_DELBA_WIN_SIZE_GET(*msg_word);
  2540. status = dp_rx_delba_ind_handler(
  2541. soc->dp_soc,
  2542. peer_id, tid, win_sz);
  2543. QDF_TRACE(QDF_MODULE_ID_TXRX,
  2544. QDF_TRACE_LEVEL_INFO,
  2545. FL("DELBA PeerID %d BAW %d TID %d stat %d"),
  2546. peer_id, win_sz, tid, status);
  2547. break;
  2548. }
  2549. case HTT_T2H_MSG_TYPE_FSE_CMEM_BASE_SEND:
  2550. {
  2551. uint16_t num_entries;
  2552. uint32_t cmem_ba_lo;
  2553. uint32_t cmem_ba_hi;
  2554. num_entries = HTT_CMEM_BASE_SEND_NUM_ENTRIES_GET(*msg_word);
  2555. cmem_ba_lo = *(msg_word + 1);
  2556. cmem_ba_hi = *(msg_word + 2);
  2557. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
  2558. FL("CMEM FSE num_entries %u CMEM BA LO %x HI %x"),
  2559. num_entries, cmem_ba_lo, cmem_ba_hi);
  2560. dp_rx_fst_update_cmem_params(soc->dp_soc, num_entries,
  2561. cmem_ba_lo, cmem_ba_hi);
  2562. break;
  2563. }
  2564. case HTT_T2H_MSG_TYPE_TX_OFFLOAD_DELIVER_IND:
  2565. {
  2566. dp_offload_ind_handler(soc, msg_word);
  2567. break;
  2568. }
  2569. default:
  2570. break;
  2571. };
  2572. /* Free the indication buffer */
  2573. if (free_buf)
  2574. qdf_nbuf_free(htt_t2h_msg);
  2575. }
  2576. /*
  2577. * dp_htt_h2t_full() - Send full handler (called from HTC)
  2578. * @context: Opaque context (HTT SOC handle)
  2579. * @pkt: HTC packet
  2580. *
  2581. * Return: enum htc_send_full_action
  2582. */
  2583. static enum htc_send_full_action
  2584. dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
  2585. {
  2586. return HTC_SEND_FULL_KEEP;
  2587. }
  2588. /*
  2589. * dp_htt_hif_t2h_hp_callback() - HIF callback for high priority T2H messages
  2590. * @context: Opaque context (HTT SOC handle)
  2591. * @nbuf: nbuf containing T2H message
  2592. * @pipe_id: HIF pipe ID
  2593. *
  2594. * Return: QDF_STATUS
  2595. *
  2596. * TODO: Temporary change to bypass HTC connection for this new HIF pipe, which
  2597. * will be used for packet log and other high-priority HTT messages. Proper
  2598. * HTC connection to be added later once required FW changes are available
  2599. */
  2600. static QDF_STATUS
  2601. dp_htt_hif_t2h_hp_callback (void *context, qdf_nbuf_t nbuf, uint8_t pipe_id)
  2602. {
  2603. QDF_STATUS rc = QDF_STATUS_SUCCESS;
  2604. HTC_PACKET htc_pkt;
  2605. qdf_assert_always(pipe_id == DP_HTT_T2H_HP_PIPE);
  2606. qdf_mem_zero(&htc_pkt, sizeof(htc_pkt));
  2607. htc_pkt.Status = QDF_STATUS_SUCCESS;
  2608. htc_pkt.pPktContext = (void *)nbuf;
  2609. dp_htt_t2h_msg_handler(context, &htc_pkt);
  2610. return rc;
  2611. }
  2612. /*
  2613. * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
  2614. * @htt_soc: HTT SOC handle
  2615. *
  2616. * Return: QDF_STATUS
  2617. */
  2618. static QDF_STATUS
  2619. htt_htc_soc_attach(struct htt_soc *soc)
  2620. {
  2621. struct htc_service_connect_req connect;
  2622. struct htc_service_connect_resp response;
  2623. QDF_STATUS status;
  2624. struct dp_soc *dpsoc = soc->dp_soc;
  2625. qdf_mem_zero(&connect, sizeof(connect));
  2626. qdf_mem_zero(&response, sizeof(response));
  2627. connect.pMetaData = NULL;
  2628. connect.MetaDataLength = 0;
  2629. connect.EpCallbacks.pContext = soc;
  2630. connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
  2631. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  2632. connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
  2633. /* rx buffers currently are provided by HIF, not by EpRecvRefill */
  2634. connect.EpCallbacks.EpRecvRefill = NULL;
  2635. /* N/A, fill is done by HIF */
  2636. connect.EpCallbacks.RecvRefillWaterMark = 1;
  2637. connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
  2638. /*
  2639. * Specify how deep to let a queue get before htc_send_pkt will
  2640. * call the EpSendFull function due to excessive send queue depth.
  2641. */
  2642. connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
  2643. /* disable flow control for HTT data message service */
  2644. connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  2645. /* connect to control service */
  2646. connect.service_id = HTT_DATA_MSG_SVC;
  2647. status = htc_connect_service(soc->htc_soc, &connect, &response);
  2648. if (status != QDF_STATUS_SUCCESS)
  2649. return status;
  2650. soc->htc_endpoint = response.Endpoint;
  2651. hif_save_htc_htt_config_endpoint(dpsoc->hif_handle, soc->htc_endpoint);
  2652. htt_interface_logging_init(&soc->htt_logger_handle, soc->ctrl_psoc);
  2653. dp_hif_update_pipe_callback(soc->dp_soc, (void *)soc,
  2654. dp_htt_hif_t2h_hp_callback, DP_HTT_T2H_HP_PIPE);
  2655. return QDF_STATUS_SUCCESS; /* success */
  2656. }
  2657. /*
  2658. * htt_soc_initialize() - SOC level HTT initialization
  2659. * @htt_soc: Opaque htt SOC handle
  2660. * @ctrl_psoc: Opaque ctrl SOC handle
  2661. * @htc_soc: SOC level HTC handle
  2662. * @hal_soc: Opaque HAL SOC handle
  2663. * @osdev: QDF device
  2664. *
  2665. * Return: HTT handle on success; NULL on failure
  2666. */
  2667. void *
  2668. htt_soc_initialize(struct htt_soc *htt_soc,
  2669. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  2670. HTC_HANDLE htc_soc,
  2671. hal_soc_handle_t hal_soc_hdl, qdf_device_t osdev)
  2672. {
  2673. struct htt_soc *soc = (struct htt_soc *)htt_soc;
  2674. soc->osdev = osdev;
  2675. soc->ctrl_psoc = ctrl_psoc;
  2676. soc->htc_soc = htc_soc;
  2677. soc->hal_soc = hal_soc_hdl;
  2678. if (htt_htc_soc_attach(soc))
  2679. goto fail2;
  2680. return soc;
  2681. fail2:
  2682. return NULL;
  2683. }
  2684. void htt_soc_htc_dealloc(struct htt_soc *htt_handle)
  2685. {
  2686. htt_interface_logging_deinit(htt_handle->htt_logger_handle);
  2687. htt_htc_misc_pkt_pool_free(htt_handle);
  2688. htt_htc_pkt_pool_free(htt_handle);
  2689. }
  2690. /*
  2691. * htt_soc_htc_prealloc() - HTC memory prealloc
  2692. * @htt_soc: SOC level HTT handle
  2693. *
  2694. * Return: QDF_STATUS_SUCCESS on Success or
  2695. * QDF_STATUS_E_NOMEM on allocation failure
  2696. */
  2697. QDF_STATUS htt_soc_htc_prealloc(struct htt_soc *soc)
  2698. {
  2699. int i;
  2700. soc->htt_htc_pkt_freelist = NULL;
  2701. /* pre-allocate some HTC_PACKET objects */
  2702. for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
  2703. struct dp_htt_htc_pkt_union *pkt;
  2704. pkt = qdf_mem_malloc(sizeof(*pkt));
  2705. if (!pkt)
  2706. return QDF_STATUS_E_NOMEM;
  2707. htt_htc_pkt_free(soc, &pkt->u.pkt);
  2708. }
  2709. return QDF_STATUS_SUCCESS;
  2710. }
  2711. /*
  2712. * htt_soc_detach() - Free SOC level HTT handle
  2713. * @htt_hdl: HTT SOC handle
  2714. */
  2715. void htt_soc_detach(struct htt_soc *htt_hdl)
  2716. {
  2717. int i;
  2718. struct htt_soc *htt_handle = (struct htt_soc *)htt_hdl;
  2719. for (i = 0; i < MAX_PDEV_CNT; i++) {
  2720. qdf_mem_free(htt_handle->pdevid_tt[i].umac_ttt);
  2721. qdf_mem_free(htt_handle->pdevid_tt[i].lmac_ttt);
  2722. }
  2723. HTT_TX_MUTEX_DESTROY(&htt_handle->htt_tx_mutex);
  2724. qdf_mem_free(htt_handle);
  2725. }
  2726. /**
  2727. * dp_h2t_ext_stats_msg_send(): function to contruct HTT message to pass to FW
  2728. * @pdev: DP PDEV handle
  2729. * @stats_type_upload_mask: stats type requested by user
  2730. * @config_param_0: extra configuration parameters
  2731. * @config_param_1: extra configuration parameters
  2732. * @config_param_2: extra configuration parameters
  2733. * @config_param_3: extra configuration parameters
  2734. * @mac_id: mac number
  2735. *
  2736. * return: QDF STATUS
  2737. */
  2738. QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
  2739. uint32_t stats_type_upload_mask, uint32_t config_param_0,
  2740. uint32_t config_param_1, uint32_t config_param_2,
  2741. uint32_t config_param_3, int cookie_val, int cookie_msb,
  2742. uint8_t mac_id)
  2743. {
  2744. struct htt_soc *soc = pdev->soc->htt_handle;
  2745. struct dp_htt_htc_pkt *pkt;
  2746. qdf_nbuf_t msg;
  2747. uint32_t *msg_word;
  2748. uint8_t pdev_mask = 0;
  2749. uint8_t *htt_logger_bufp;
  2750. int mac_for_pdev;
  2751. int target_pdev_id;
  2752. QDF_STATUS status;
  2753. msg = qdf_nbuf_alloc(
  2754. soc->osdev,
  2755. HTT_MSG_BUF_SIZE(HTT_H2T_EXT_STATS_REQ_MSG_SZ),
  2756. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
  2757. if (!msg)
  2758. return QDF_STATUS_E_NOMEM;
  2759. /*TODO:Add support for SOC stats
  2760. * Bit 0: SOC Stats
  2761. * Bit 1: Pdev stats for pdev id 0
  2762. * Bit 2: Pdev stats for pdev id 1
  2763. * Bit 3: Pdev stats for pdev id 2
  2764. */
  2765. mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  2766. target_pdev_id =
  2767. dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
  2768. pdev_mask = 1 << target_pdev_id;
  2769. /*
  2770. * Set the length of the message.
  2771. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  2772. * separately during the below call to qdf_nbuf_push_head.
  2773. * The contribution from the HTC header is added separately inside HTC.
  2774. */
  2775. if (qdf_nbuf_put_tail(msg, HTT_H2T_EXT_STATS_REQ_MSG_SZ) == NULL) {
  2776. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2777. "Failed to expand head for HTT_EXT_STATS");
  2778. qdf_nbuf_free(msg);
  2779. return QDF_STATUS_E_FAILURE;
  2780. }
  2781. dp_htt_tx_stats_info("%pK: cookie <-> %d\n config_param_0 %u\n"
  2782. "config_param_1 %u\n config_param_2 %u\n"
  2783. "config_param_4 %u\n -------------",
  2784. pdev->soc, cookie_val,
  2785. config_param_0,
  2786. config_param_1, config_param_2, config_param_3);
  2787. msg_word = (uint32_t *) qdf_nbuf_data(msg);
  2788. qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
  2789. htt_logger_bufp = (uint8_t *)msg_word;
  2790. *msg_word = 0;
  2791. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_EXT_STATS_REQ);
  2792. HTT_H2T_EXT_STATS_REQ_PDEV_MASK_SET(*msg_word, pdev_mask);
  2793. HTT_H2T_EXT_STATS_REQ_STATS_TYPE_SET(*msg_word, stats_type_upload_mask);
  2794. /* word 1 */
  2795. msg_word++;
  2796. *msg_word = 0;
  2797. HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_0);
  2798. /* word 2 */
  2799. msg_word++;
  2800. *msg_word = 0;
  2801. HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_1);
  2802. /* word 3 */
  2803. msg_word++;
  2804. *msg_word = 0;
  2805. HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_2);
  2806. /* word 4 */
  2807. msg_word++;
  2808. *msg_word = 0;
  2809. HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, config_param_3);
  2810. HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, 0);
  2811. /* word 5 */
  2812. msg_word++;
  2813. /* word 6 */
  2814. msg_word++;
  2815. *msg_word = 0;
  2816. HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_val);
  2817. /* word 7 */
  2818. msg_word++;
  2819. *msg_word = 0;
  2820. /* Currently Using last 2 bits for pdev_id
  2821. * For future reference, reserving 3 bits in cookie_msb for pdev_id
  2822. */
  2823. cookie_msb = (cookie_msb | pdev->pdev_id);
  2824. HTT_H2T_EXT_STATS_REQ_CONFIG_PARAM_SET(*msg_word, cookie_msb);
  2825. pkt = htt_htc_pkt_alloc(soc);
  2826. if (!pkt) {
  2827. qdf_nbuf_free(msg);
  2828. return QDF_STATUS_E_NOMEM;
  2829. }
  2830. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  2831. SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
  2832. dp_htt_h2t_send_complete_free_netbuf,
  2833. qdf_nbuf_data(msg), qdf_nbuf_len(msg),
  2834. soc->htc_endpoint,
  2835. /* tag for FW response msg not guaranteed */
  2836. HTC_TX_PACKET_TAG_RUNTIME_PUT);
  2837. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
  2838. status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ,
  2839. htt_logger_bufp);
  2840. if (status != QDF_STATUS_SUCCESS) {
  2841. qdf_nbuf_free(msg);
  2842. htt_htc_pkt_free(soc, pkt);
  2843. }
  2844. return status;
  2845. }
  2846. /**
  2847. * dp_h2t_3tuple_config_send(): function to contruct 3 tuple configuration
  2848. * HTT message to pass to FW
  2849. * @pdev: DP PDEV handle
  2850. * @tuple_mask: tuple configuration to report 3 tuple hash value in either
  2851. * toeplitz_2_or_4 or flow_id_toeplitz in MSDU START TLV.
  2852. *
  2853. * tuple_mask[1:0]:
  2854. * 00 - Do not report 3 tuple hash value
  2855. * 10 - Report 3 tuple hash value in toeplitz_2_or_4
  2856. * 01 - Report 3 tuple hash value in flow_id_toeplitz
  2857. * 11 - Report 3 tuple hash value in both toeplitz_2_or_4 & flow_id_toeplitz
  2858. *
  2859. * return: QDF STATUS
  2860. */
  2861. QDF_STATUS dp_h2t_3tuple_config_send(struct dp_pdev *pdev,
  2862. uint32_t tuple_mask, uint8_t mac_id)
  2863. {
  2864. struct htt_soc *soc = pdev->soc->htt_handle;
  2865. struct dp_htt_htc_pkt *pkt;
  2866. qdf_nbuf_t msg;
  2867. uint32_t *msg_word;
  2868. uint8_t *htt_logger_bufp;
  2869. int mac_for_pdev;
  2870. int target_pdev_id;
  2871. msg = qdf_nbuf_alloc(
  2872. soc->osdev,
  2873. HTT_MSG_BUF_SIZE(HTT_3_TUPLE_HASH_CFG_REQ_BYTES),
  2874. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
  2875. if (!msg)
  2876. return QDF_STATUS_E_NOMEM;
  2877. mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
  2878. target_pdev_id =
  2879. dp_get_target_pdev_id_for_host_pdev_id(pdev->soc, mac_for_pdev);
  2880. /*
  2881. * Set the length of the message.
  2882. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  2883. * separately during the below call to qdf_nbuf_push_head.
  2884. * The contribution from the HTC header is added separately inside HTC.
  2885. */
  2886. if (!qdf_nbuf_put_tail(msg, HTT_3_TUPLE_HASH_CFG_REQ_BYTES)) {
  2887. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2888. "Failed to expand head for HTT_3TUPLE_CONFIG");
  2889. qdf_nbuf_free(msg);
  2890. return QDF_STATUS_E_FAILURE;
  2891. }
  2892. dp_htt_info("%pK: config_param_sent 0x%x for target_pdev %d\n -------------",
  2893. pdev->soc, tuple_mask, target_pdev_id);
  2894. msg_word = (uint32_t *)qdf_nbuf_data(msg);
  2895. qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
  2896. htt_logger_bufp = (uint8_t *)msg_word;
  2897. *msg_word = 0;
  2898. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG);
  2899. HTT_RX_3_TUPLE_HASH_PDEV_ID_SET(*msg_word, target_pdev_id);
  2900. msg_word++;
  2901. *msg_word = 0;
  2902. HTT_H2T_FLOW_ID_TOEPLITZ_FIELD_CONFIG_SET(*msg_word, tuple_mask);
  2903. HTT_H2T_TOEPLITZ_2_OR_4_FIELD_CONFIG_SET(*msg_word, tuple_mask);
  2904. pkt = htt_htc_pkt_alloc(soc);
  2905. if (!pkt) {
  2906. qdf_nbuf_free(msg);
  2907. return QDF_STATUS_E_NOMEM;
  2908. }
  2909. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  2910. SET_HTC_PACKET_INFO_TX(
  2911. &pkt->htc_pkt,
  2912. dp_htt_h2t_send_complete_free_netbuf,
  2913. qdf_nbuf_data(msg),
  2914. qdf_nbuf_len(msg),
  2915. soc->htc_endpoint,
  2916. /* tag for no FW response msg */
  2917. HTC_TX_PACKET_TAG_RUNTIME_PUT);
  2918. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
  2919. DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_3_TUPLE_HASH_CFG,
  2920. htt_logger_bufp);
  2921. return QDF_STATUS_SUCCESS;
  2922. }
  2923. /* This macro will revert once proper HTT header will define for
  2924. * HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in htt.h file
  2925. * */
  2926. #if defined(WDI_EVENT_ENABLE)
  2927. /**
  2928. * dp_h2t_cfg_stats_msg_send(): function to construct HTT message to pass to FW
  2929. * @pdev: DP PDEV handle
  2930. * @stats_type_upload_mask: stats type requested by user
  2931. * @mac_id: Mac id number
  2932. *
  2933. * return: QDF STATUS
  2934. */
  2935. QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
  2936. uint32_t stats_type_upload_mask, uint8_t mac_id)
  2937. {
  2938. struct htt_soc *soc = pdev->soc->htt_handle;
  2939. struct dp_htt_htc_pkt *pkt;
  2940. qdf_nbuf_t msg;
  2941. uint32_t *msg_word;
  2942. uint8_t pdev_mask;
  2943. QDF_STATUS status;
  2944. msg = qdf_nbuf_alloc(
  2945. soc->osdev,
  2946. HTT_MSG_BUF_SIZE(HTT_H2T_PPDU_STATS_CFG_MSG_SZ),
  2947. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
  2948. if (!msg) {
  2949. dp_htt_err("%pK: Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer"
  2950. , pdev->soc);
  2951. qdf_assert(0);
  2952. return QDF_STATUS_E_NOMEM;
  2953. }
  2954. /*TODO:Add support for SOC stats
  2955. * Bit 0: SOC Stats
  2956. * Bit 1: Pdev stats for pdev id 0
  2957. * Bit 2: Pdev stats for pdev id 1
  2958. * Bit 3: Pdev stats for pdev id 2
  2959. */
  2960. pdev_mask = 1 << dp_get_target_pdev_id_for_host_pdev_id(pdev->soc,
  2961. mac_id);
  2962. /*
  2963. * Set the length of the message.
  2964. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  2965. * separately during the below call to qdf_nbuf_push_head.
  2966. * The contribution from the HTC header is added separately inside HTC.
  2967. */
  2968. if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
  2969. dp_htt_err("%pK: Failed to expand head for HTT_CFG_STATS"
  2970. , pdev->soc);
  2971. qdf_nbuf_free(msg);
  2972. return QDF_STATUS_E_FAILURE;
  2973. }
  2974. msg_word = (uint32_t *) qdf_nbuf_data(msg);
  2975. qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
  2976. *msg_word = 0;
  2977. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
  2978. HTT_H2T_PPDU_STATS_CFG_PDEV_MASK_SET(*msg_word, pdev_mask);
  2979. HTT_H2T_PPDU_STATS_CFG_TLV_BITMASK_SET(*msg_word,
  2980. stats_type_upload_mask);
  2981. pkt = htt_htc_pkt_alloc(soc);
  2982. if (!pkt) {
  2983. dp_htt_err("%pK: Fail to allocate dp_htt_htc_pkt buffer", pdev->soc);
  2984. qdf_assert(0);
  2985. qdf_nbuf_free(msg);
  2986. return QDF_STATUS_E_NOMEM;
  2987. }
  2988. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  2989. SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
  2990. dp_htt_h2t_send_complete_free_netbuf,
  2991. qdf_nbuf_data(msg), qdf_nbuf_len(msg),
  2992. soc->htc_endpoint,
  2993. /* tag for no FW response msg */
  2994. HTC_TX_PACKET_TAG_RUNTIME_PUT);
  2995. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
  2996. status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
  2997. (uint8_t *)msg_word);
  2998. if (status != QDF_STATUS_SUCCESS) {
  2999. qdf_nbuf_free(msg);
  3000. htt_htc_pkt_free(soc, pkt);
  3001. }
  3002. return status;
  3003. }
  3004. qdf_export_symbol(dp_h2t_cfg_stats_msg_send);
  3005. #endif
  3006. void
  3007. dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
  3008. uint32_t *tag_buf)
  3009. {
  3010. struct dp_peer *peer = NULL;
  3011. switch (tag_type) {
  3012. case HTT_STATS_PEER_DETAILS_TAG:
  3013. {
  3014. htt_peer_details_tlv *dp_stats_buf =
  3015. (htt_peer_details_tlv *)tag_buf;
  3016. pdev->fw_stats_peer_id = dp_stats_buf->sw_peer_id;
  3017. }
  3018. break;
  3019. case HTT_STATS_PEER_STATS_CMN_TAG:
  3020. {
  3021. htt_peer_stats_cmn_tlv *dp_stats_buf =
  3022. (htt_peer_stats_cmn_tlv *)tag_buf;
  3023. peer = dp_peer_get_ref_by_id(pdev->soc, pdev->fw_stats_peer_id,
  3024. DP_MOD_ID_HTT);
  3025. if (peer && !peer->bss_peer) {
  3026. peer->stats.tx.inactive_time =
  3027. dp_stats_buf->inactive_time;
  3028. qdf_event_set(&pdev->fw_peer_stats_event);
  3029. }
  3030. if (peer)
  3031. dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
  3032. }
  3033. break;
  3034. default:
  3035. qdf_err("Invalid tag_type");
  3036. }
  3037. }
  3038. /**
  3039. * dp_htt_rx_flow_fst_setup(): Send HTT Rx FST setup message to FW
  3040. * @pdev: DP pdev handle
  3041. * @fse_setup_info: FST setup parameters
  3042. *
  3043. * Return: Success when HTT message is sent, error on failure
  3044. */
  3045. QDF_STATUS
  3046. dp_htt_rx_flow_fst_setup(struct dp_pdev *pdev,
  3047. struct dp_htt_rx_flow_fst_setup *fse_setup_info)
  3048. {
  3049. struct htt_soc *soc = pdev->soc->htt_handle;
  3050. struct dp_htt_htc_pkt *pkt;
  3051. qdf_nbuf_t msg;
  3052. u_int32_t *msg_word;
  3053. struct htt_h2t_msg_rx_fse_setup_t *fse_setup;
  3054. uint8_t *htt_logger_bufp;
  3055. u_int32_t *key;
  3056. QDF_STATUS status;
  3057. msg = qdf_nbuf_alloc(
  3058. soc->osdev,
  3059. HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_setup_t)),
  3060. /* reserve room for the HTC header */
  3061. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
  3062. if (!msg)
  3063. return QDF_STATUS_E_NOMEM;
  3064. /*
  3065. * Set the length of the message.
  3066. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  3067. * separately during the below call to qdf_nbuf_push_head.
  3068. * The contribution from the HTC header is added separately inside HTC.
  3069. */
  3070. if (!qdf_nbuf_put_tail(msg,
  3071. sizeof(struct htt_h2t_msg_rx_fse_setup_t))) {
  3072. qdf_err("Failed to expand head for HTT RX_FSE_SETUP msg");
  3073. return QDF_STATUS_E_FAILURE;
  3074. }
  3075. /* fill in the message contents */
  3076. msg_word = (u_int32_t *)qdf_nbuf_data(msg);
  3077. memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_setup_t));
  3078. /* rewind beyond alignment pad to get to the HTC header reserved area */
  3079. qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
  3080. htt_logger_bufp = (uint8_t *)msg_word;
  3081. *msg_word = 0;
  3082. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG);
  3083. fse_setup = (struct htt_h2t_msg_rx_fse_setup_t *)msg_word;
  3084. HTT_RX_FSE_SETUP_PDEV_ID_SET(*msg_word, fse_setup_info->pdev_id);
  3085. msg_word++;
  3086. HTT_RX_FSE_SETUP_NUM_REC_SET(*msg_word, fse_setup_info->max_entries);
  3087. HTT_RX_FSE_SETUP_MAX_SEARCH_SET(*msg_word, fse_setup_info->max_search);
  3088. HTT_RX_FSE_SETUP_IP_DA_SA_PREFIX_SET(*msg_word,
  3089. fse_setup_info->ip_da_sa_prefix);
  3090. msg_word++;
  3091. HTT_RX_FSE_SETUP_BASE_ADDR_LO_SET(*msg_word,
  3092. fse_setup_info->base_addr_lo);
  3093. msg_word++;
  3094. HTT_RX_FSE_SETUP_BASE_ADDR_HI_SET(*msg_word,
  3095. fse_setup_info->base_addr_hi);
  3096. key = (u_int32_t *)fse_setup_info->hash_key;
  3097. fse_setup->toeplitz31_0 = *key++;
  3098. fse_setup->toeplitz63_32 = *key++;
  3099. fse_setup->toeplitz95_64 = *key++;
  3100. fse_setup->toeplitz127_96 = *key++;
  3101. fse_setup->toeplitz159_128 = *key++;
  3102. fse_setup->toeplitz191_160 = *key++;
  3103. fse_setup->toeplitz223_192 = *key++;
  3104. fse_setup->toeplitz255_224 = *key++;
  3105. fse_setup->toeplitz287_256 = *key++;
  3106. fse_setup->toeplitz314_288 = *key;
  3107. msg_word++;
  3108. HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz31_0);
  3109. msg_word++;
  3110. HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz63_32);
  3111. msg_word++;
  3112. HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz95_64);
  3113. msg_word++;
  3114. HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz127_96);
  3115. msg_word++;
  3116. HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz159_128);
  3117. msg_word++;
  3118. HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz191_160);
  3119. msg_word++;
  3120. HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz223_192);
  3121. msg_word++;
  3122. HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz255_224);
  3123. msg_word++;
  3124. HTT_RX_FSE_SETUP_HASH_VALUE_SET(*msg_word, fse_setup->toeplitz287_256);
  3125. msg_word++;
  3126. HTT_RX_FSE_SETUP_HASH_314_288_SET(*msg_word,
  3127. fse_setup->toeplitz314_288);
  3128. pkt = htt_htc_pkt_alloc(soc);
  3129. if (!pkt) {
  3130. qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
  3131. qdf_assert(0);
  3132. qdf_nbuf_free(msg);
  3133. return QDF_STATUS_E_RESOURCES; /* failure */
  3134. }
  3135. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  3136. SET_HTC_PACKET_INFO_TX(
  3137. &pkt->htc_pkt,
  3138. dp_htt_h2t_send_complete_free_netbuf,
  3139. qdf_nbuf_data(msg),
  3140. qdf_nbuf_len(msg),
  3141. soc->htc_endpoint,
  3142. /* tag for no FW response msg */
  3143. HTC_TX_PACKET_TAG_RUNTIME_PUT);
  3144. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
  3145. status = DP_HTT_SEND_HTC_PKT(soc, pkt,
  3146. HTT_H2T_MSG_TYPE_RX_FSE_SETUP_CFG,
  3147. htt_logger_bufp);
  3148. if (status == QDF_STATUS_SUCCESS) {
  3149. dp_info("HTT_H2T RX_FSE_SETUP sent to FW for pdev = %u",
  3150. fse_setup_info->pdev_id);
  3151. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_DEBUG,
  3152. (void *)fse_setup_info->hash_key,
  3153. fse_setup_info->hash_key_len);
  3154. } else {
  3155. qdf_nbuf_free(msg);
  3156. htt_htc_pkt_free(soc, pkt);
  3157. }
  3158. return status;
  3159. }
  3160. /**
  3161. * dp_htt_rx_flow_fse_operation(): Send HTT Flow Search Entry msg to
  3162. * add/del a flow in HW
  3163. * @pdev: DP pdev handle
  3164. * @fse_op_info: Flow entry parameters
  3165. *
  3166. * Return: Success when HTT message is sent, error on failure
  3167. */
  3168. QDF_STATUS
  3169. dp_htt_rx_flow_fse_operation(struct dp_pdev *pdev,
  3170. struct dp_htt_rx_flow_fst_operation *fse_op_info)
  3171. {
  3172. struct htt_soc *soc = pdev->soc->htt_handle;
  3173. struct dp_htt_htc_pkt *pkt;
  3174. qdf_nbuf_t msg;
  3175. u_int32_t *msg_word;
  3176. struct htt_h2t_msg_rx_fse_operation_t *fse_operation;
  3177. uint8_t *htt_logger_bufp;
  3178. QDF_STATUS status;
  3179. msg = qdf_nbuf_alloc(
  3180. soc->osdev,
  3181. HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_rx_fse_operation_t)),
  3182. /* reserve room for the HTC header */
  3183. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
  3184. if (!msg)
  3185. return QDF_STATUS_E_NOMEM;
  3186. /*
  3187. * Set the length of the message.
  3188. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  3189. * separately during the below call to qdf_nbuf_push_head.
  3190. * The contribution from the HTC header is added separately inside HTC.
  3191. */
  3192. if (!qdf_nbuf_put_tail(msg,
  3193. sizeof(struct htt_h2t_msg_rx_fse_operation_t))) {
  3194. qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
  3195. qdf_nbuf_free(msg);
  3196. return QDF_STATUS_E_FAILURE;
  3197. }
  3198. /* fill in the message contents */
  3199. msg_word = (u_int32_t *)qdf_nbuf_data(msg);
  3200. memset(msg_word, 0, sizeof(struct htt_h2t_msg_rx_fse_operation_t));
  3201. /* rewind beyond alignment pad to get to the HTC header reserved area */
  3202. qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
  3203. htt_logger_bufp = (uint8_t *)msg_word;
  3204. *msg_word = 0;
  3205. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG);
  3206. fse_operation = (struct htt_h2t_msg_rx_fse_operation_t *)msg_word;
  3207. HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, fse_op_info->pdev_id);
  3208. msg_word++;
  3209. HTT_RX_FSE_IPSEC_VALID_SET(*msg_word, false);
  3210. if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_ENTRY) {
  3211. HTT_RX_FSE_OPERATION_SET(*msg_word,
  3212. HTT_RX_FSE_CACHE_INVALIDATE_ENTRY);
  3213. msg_word++;
  3214. HTT_RX_FSE_OPERATION_IP_ADDR_SET(
  3215. *msg_word,
  3216. qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_31_0));
  3217. msg_word++;
  3218. HTT_RX_FSE_OPERATION_IP_ADDR_SET(
  3219. *msg_word,
  3220. qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_63_32));
  3221. msg_word++;
  3222. HTT_RX_FSE_OPERATION_IP_ADDR_SET(
  3223. *msg_word,
  3224. qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_95_64));
  3225. msg_word++;
  3226. HTT_RX_FSE_OPERATION_IP_ADDR_SET(
  3227. *msg_word,
  3228. qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.src_ip_127_96));
  3229. msg_word++;
  3230. HTT_RX_FSE_OPERATION_IP_ADDR_SET(
  3231. *msg_word,
  3232. qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_31_0));
  3233. msg_word++;
  3234. HTT_RX_FSE_OPERATION_IP_ADDR_SET(
  3235. *msg_word,
  3236. qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_63_32));
  3237. msg_word++;
  3238. HTT_RX_FSE_OPERATION_IP_ADDR_SET(
  3239. *msg_word,
  3240. qdf_htonl(fse_op_info->rx_flow->flow_tuple_info.dest_ip_95_64));
  3241. msg_word++;
  3242. HTT_RX_FSE_OPERATION_IP_ADDR_SET(
  3243. *msg_word,
  3244. qdf_htonl(
  3245. fse_op_info->rx_flow->flow_tuple_info.dest_ip_127_96));
  3246. msg_word++;
  3247. HTT_RX_FSE_SOURCEPORT_SET(
  3248. *msg_word,
  3249. fse_op_info->rx_flow->flow_tuple_info.src_port);
  3250. HTT_RX_FSE_DESTPORT_SET(
  3251. *msg_word,
  3252. fse_op_info->rx_flow->flow_tuple_info.dest_port);
  3253. msg_word++;
  3254. HTT_RX_FSE_L4_PROTO_SET(
  3255. *msg_word,
  3256. fse_op_info->rx_flow->flow_tuple_info.l4_protocol);
  3257. } else if (fse_op_info->op_code == DP_HTT_FST_CACHE_INVALIDATE_FULL) {
  3258. HTT_RX_FSE_OPERATION_SET(*msg_word,
  3259. HTT_RX_FSE_CACHE_INVALIDATE_FULL);
  3260. } else if (fse_op_info->op_code == DP_HTT_FST_DISABLE) {
  3261. HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_DISABLE);
  3262. } else if (fse_op_info->op_code == DP_HTT_FST_ENABLE) {
  3263. HTT_RX_FSE_OPERATION_SET(*msg_word, HTT_RX_FSE_ENABLE);
  3264. }
  3265. pkt = htt_htc_pkt_alloc(soc);
  3266. if (!pkt) {
  3267. qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
  3268. qdf_assert(0);
  3269. qdf_nbuf_free(msg);
  3270. return QDF_STATUS_E_RESOURCES; /* failure */
  3271. }
  3272. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  3273. SET_HTC_PACKET_INFO_TX(
  3274. &pkt->htc_pkt,
  3275. dp_htt_h2t_send_complete_free_netbuf,
  3276. qdf_nbuf_data(msg),
  3277. qdf_nbuf_len(msg),
  3278. soc->htc_endpoint,
  3279. /* tag for no FW response msg */
  3280. HTC_TX_PACKET_TAG_RUNTIME_PUT);
  3281. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
  3282. status = DP_HTT_SEND_HTC_PKT(soc, pkt,
  3283. HTT_H2T_MSG_TYPE_RX_FSE_OPERATION_CFG,
  3284. htt_logger_bufp);
  3285. if (status == QDF_STATUS_SUCCESS) {
  3286. dp_info("HTT_H2T RX_FSE_OPERATION_CFG sent to FW for pdev = %u",
  3287. fse_op_info->pdev_id);
  3288. } else {
  3289. qdf_nbuf_free(msg);
  3290. htt_htc_pkt_free(soc, pkt);
  3291. }
  3292. return status;
  3293. }
  3294. /**
  3295. * dp_htt_rx_fisa_config(): Send HTT msg to configure FISA
  3296. * @pdev: DP pdev handle
  3297. * @fse_op_info: Flow entry parameters
  3298. *
  3299. * Return: Success when HTT message is sent, error on failure
  3300. */
  3301. QDF_STATUS
  3302. dp_htt_rx_fisa_config(struct dp_pdev *pdev,
  3303. struct dp_htt_rx_fisa_cfg *fisa_config)
  3304. {
  3305. struct htt_soc *soc = pdev->soc->htt_handle;
  3306. struct dp_htt_htc_pkt *pkt;
  3307. qdf_nbuf_t msg;
  3308. u_int32_t *msg_word;
  3309. struct htt_h2t_msg_type_fisa_config_t *htt_fisa_config;
  3310. uint8_t *htt_logger_bufp;
  3311. uint32_t len;
  3312. QDF_STATUS status;
  3313. len = HTT_MSG_BUF_SIZE(sizeof(struct htt_h2t_msg_type_fisa_config_t));
  3314. msg = qdf_nbuf_alloc(soc->osdev,
  3315. len,
  3316. /* reserve room for the HTC header */
  3317. HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING,
  3318. 4,
  3319. TRUE);
  3320. if (!msg)
  3321. return QDF_STATUS_E_NOMEM;
  3322. /*
  3323. * Set the length of the message.
  3324. * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
  3325. * separately during the below call to qdf_nbuf_push_head.
  3326. * The contribution from the HTC header is added separately inside HTC.
  3327. */
  3328. if (!qdf_nbuf_put_tail(msg,
  3329. sizeof(struct htt_h2t_msg_type_fisa_config_t))) {
  3330. qdf_err("Failed to expand head for HTT_RX_FSE_OPERATION msg");
  3331. qdf_nbuf_free(msg);
  3332. return QDF_STATUS_E_FAILURE;
  3333. }
  3334. /* fill in the message contents */
  3335. msg_word = (u_int32_t *)qdf_nbuf_data(msg);
  3336. memset(msg_word, 0, sizeof(struct htt_h2t_msg_type_fisa_config_t));
  3337. /* rewind beyond alignment pad to get to the HTC header reserved area */
  3338. qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
  3339. htt_logger_bufp = (uint8_t *)msg_word;
  3340. *msg_word = 0;
  3341. HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_FISA_CFG);
  3342. htt_fisa_config = (struct htt_h2t_msg_type_fisa_config_t *)msg_word;
  3343. HTT_RX_FSE_OPERATION_PDEV_ID_SET(*msg_word, htt_fisa_config->pdev_id);
  3344. msg_word++;
  3345. HTT_RX_FISA_CONFIG_FISA_V2_ENABLE_SET(*msg_word, 1);
  3346. HTT_RX_FISA_CONFIG_FISA_V2_AGGR_LIMIT_SET(*msg_word, 0xf);
  3347. msg_word++;
  3348. htt_fisa_config->fisa_timeout_threshold = fisa_config->fisa_timeout;
  3349. pkt = htt_htc_pkt_alloc(soc);
  3350. if (!pkt) {
  3351. qdf_err("Fail to allocate dp_htt_htc_pkt buffer");
  3352. qdf_assert(0);
  3353. qdf_nbuf_free(msg);
  3354. return QDF_STATUS_E_RESOURCES; /* failure */
  3355. }
  3356. pkt->soc_ctxt = NULL; /* not used during send-done callback */
  3357. SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
  3358. dp_htt_h2t_send_complete_free_netbuf,
  3359. qdf_nbuf_data(msg),
  3360. qdf_nbuf_len(msg),
  3361. soc->htc_endpoint,
  3362. /* tag for no FW response msg */
  3363. HTC_TX_PACKET_TAG_RUNTIME_PUT);
  3364. SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
  3365. status = DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_RX_FISA_CFG,
  3366. htt_logger_bufp);
  3367. if (status == QDF_STATUS_SUCCESS) {
  3368. dp_info("HTT_H2T_MSG_TYPE_RX_FISA_CFG sent to FW for pdev = %u",
  3369. fisa_config->pdev_id);
  3370. } else {
  3371. qdf_nbuf_free(msg);
  3372. htt_htc_pkt_free(soc, pkt);
  3373. }
  3374. return status;
  3375. }
  3376. /**
  3377. * dp_bk_pressure_stats_handler(): worker function to print back pressure
  3378. * stats
  3379. *
  3380. * @context : argument to work function
  3381. */
  3382. static void dp_bk_pressure_stats_handler(void *context)
  3383. {
  3384. struct dp_pdev *pdev = (struct dp_pdev *)context;
  3385. struct dp_soc_srngs_state *soc_srngs_state = NULL;
  3386. const char *ring_name;
  3387. int i;
  3388. struct dp_srng_ring_state *ring_state;
  3389. bool empty_flag;
  3390. qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
  3391. /* Extract only first entry for printing in one work event */
  3392. if (pdev->bkp_stats.queue_depth &&
  3393. !TAILQ_EMPTY(&pdev->bkp_stats.list)) {
  3394. soc_srngs_state = TAILQ_FIRST(&pdev->bkp_stats.list);
  3395. TAILQ_REMOVE(&pdev->bkp_stats.list, soc_srngs_state,
  3396. list_elem);
  3397. pdev->bkp_stats.queue_depth--;
  3398. }
  3399. empty_flag = TAILQ_EMPTY(&pdev->bkp_stats.list);
  3400. qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
  3401. if (soc_srngs_state) {
  3402. DP_PRINT_STATS("### BKP stats for seq_num %u START ###",
  3403. soc_srngs_state->seq_num);
  3404. for (i = 0; i < soc_srngs_state->max_ring_id; i++) {
  3405. ring_state = &soc_srngs_state->ring_state[i];
  3406. ring_name = dp_srng_get_str_from_hal_ring_type
  3407. (ring_state->ring_type);
  3408. DP_PRINT_STATS("%s: SW:Head pointer = %d Tail Pointer = %d\n",
  3409. ring_name,
  3410. ring_state->sw_head,
  3411. ring_state->sw_tail);
  3412. DP_PRINT_STATS("%s: HW:Head pointer = %d Tail Pointer = %d\n",
  3413. ring_name,
  3414. ring_state->hw_head,
  3415. ring_state->hw_tail);
  3416. }
  3417. DP_PRINT_STATS("### BKP stats for seq_num %u COMPLETE ###",
  3418. soc_srngs_state->seq_num);
  3419. qdf_mem_free(soc_srngs_state);
  3420. }
  3421. dp_print_napi_stats(pdev->soc);
  3422. /* Schedule work again if queue is not empty */
  3423. if (!empty_flag)
  3424. qdf_queue_work(0, pdev->bkp_stats.work_queue,
  3425. &pdev->bkp_stats.work);
  3426. }
  3427. /*
  3428. * dp_pdev_bkp_stats_detach() - detach resources for back pressure stats
  3429. * processing
  3430. * @pdev: Datapath PDEV handle
  3431. *
  3432. */
  3433. void dp_pdev_bkp_stats_detach(struct dp_pdev *pdev)
  3434. {
  3435. struct dp_soc_srngs_state *ring_state, *ring_state_next;
  3436. if (!pdev->bkp_stats.work_queue)
  3437. return;
  3438. qdf_flush_workqueue(0, pdev->bkp_stats.work_queue);
  3439. qdf_destroy_workqueue(0, pdev->bkp_stats.work_queue);
  3440. qdf_flush_work(&pdev->bkp_stats.work);
  3441. qdf_disable_work(&pdev->bkp_stats.work);
  3442. qdf_spin_lock_bh(&pdev->bkp_stats.list_lock);
  3443. TAILQ_FOREACH_SAFE(ring_state, &pdev->bkp_stats.list,
  3444. list_elem, ring_state_next) {
  3445. TAILQ_REMOVE(&pdev->bkp_stats.list, ring_state,
  3446. list_elem);
  3447. qdf_mem_free(ring_state);
  3448. }
  3449. qdf_spin_unlock_bh(&pdev->bkp_stats.list_lock);
  3450. qdf_spinlock_destroy(&pdev->bkp_stats.list_lock);
  3451. }
  3452. /*
  3453. * dp_pdev_bkp_stats_attach() - attach resources for back pressure stats
  3454. * processing
  3455. * @pdev: Datapath PDEV handle
  3456. *
  3457. * Return: QDF_STATUS_SUCCESS: Success
  3458. * QDF_STATUS_E_NOMEM: Error
  3459. */
  3460. QDF_STATUS dp_pdev_bkp_stats_attach(struct dp_pdev *pdev)
  3461. {
  3462. TAILQ_INIT(&pdev->bkp_stats.list);
  3463. pdev->bkp_stats.seq_num = 0;
  3464. pdev->bkp_stats.queue_depth = 0;
  3465. qdf_create_work(0, &pdev->bkp_stats.work,
  3466. dp_bk_pressure_stats_handler, pdev);
  3467. pdev->bkp_stats.work_queue =
  3468. qdf_alloc_unbound_workqueue("dp_bkp_work_queue");
  3469. if (!pdev->bkp_stats.work_queue)
  3470. goto fail;
  3471. qdf_spinlock_create(&pdev->bkp_stats.list_lock);
  3472. return QDF_STATUS_SUCCESS;
  3473. fail:
  3474. dp_htt_alert("BKP stats attach failed");
  3475. qdf_flush_work(&pdev->bkp_stats.work);
  3476. qdf_disable_work(&pdev->bkp_stats.work);
  3477. return QDF_STATUS_E_FAILURE;
  3478. }