wmi_unified.c 108 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /*
  19. * Host WMI unified implementation
  20. */
  21. #include "htc_api.h"
  22. #include "htc_api.h"
  23. #include "wmi_unified_priv.h"
  24. #include "wmi_unified_api.h"
  25. #include "qdf_module.h"
  26. #include "qdf_platform.h"
  27. #ifdef WMI_EXT_DBG
  28. #include "qdf_list.h"
  29. #include "qdf_atomic.h"
  30. #endif
  31. #ifndef WMI_NON_TLV_SUPPORT
  32. #include "wmi_tlv_helper.h"
  33. #endif
  34. #include <linux/debugfs.h>
  35. #include <target_if.h>
  36. #include <qdf_debugfs.h>
  37. #include "wmi_filtered_logging.h"
  38. #include <wmi_hang_event.h>
  39. /* This check for CONFIG_WIN temporary added due to redeclaration compilation
  40. error in MCL. Error is caused due to inclusion of wmi.h in wmi_unified_api.h
  41. which gets included here through ol_if_athvar.h. Eventually it is expected that
  42. wmi.h will be removed from wmi_unified_api.h after cleanup, which will need
  43. WMI_CMD_HDR to be defined here. */
  44. /* Copied from wmi.h */
  45. #undef MS
  46. #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
  47. #undef SM
  48. #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
  49. #undef WO
  50. #define WO(_f) ((_f##_OFFSET) >> 2)
  51. #undef GET_FIELD
  52. #define GET_FIELD(_addr, _f) MS(*((uint32_t *)(_addr) + WO(_f)), _f)
  53. #undef SET_FIELD
  54. #define SET_FIELD(_addr, _f, _val) \
  55. (*((uint32_t *)(_addr) + WO(_f)) = \
  56. (*((uint32_t *)(_addr) + WO(_f)) & ~_f##_MASK) | SM(_val, _f))
  57. #define WMI_GET_FIELD(_msg_buf, _msg_type, _f) \
  58. GET_FIELD(_msg_buf, _msg_type ## _ ## _f)
  59. #define WMI_SET_FIELD(_msg_buf, _msg_type, _f, _val) \
  60. SET_FIELD(_msg_buf, _msg_type ## _ ## _f, _val)
  61. #define WMI_EP_APASS 0x0
  62. #define WMI_EP_LPASS 0x1
  63. #define WMI_EP_SENSOR 0x2
  64. #define WMI_INFOS_DBG_FILE_PERM (QDF_FILE_USR_READ | \
  65. QDF_FILE_USR_WRITE | \
  66. QDF_FILE_GRP_READ | \
  67. QDF_FILE_OTH_READ)
  68. /*
  69. * * Control Path
  70. * */
  71. typedef PREPACK struct {
  72. uint32_t commandId:24,
  73. reserved:2, /* used for WMI endpoint ID */
  74. plt_priv:6; /* platform private */
  75. } POSTPACK WMI_CMD_HDR; /* used for commands and events */
  76. #define WMI_CMD_HDR_COMMANDID_LSB 0
  77. #define WMI_CMD_HDR_COMMANDID_MASK 0x00ffffff
  78. #define WMI_CMD_HDR_COMMANDID_OFFSET 0x00000000
  79. #define WMI_CMD_HDR_WMI_ENDPOINTID_MASK 0x03000000
  80. #define WMI_CMD_HDR_WMI_ENDPOINTID_OFFSET 24
  81. #define WMI_CMD_HDR_PLT_PRIV_LSB 24
  82. #define WMI_CMD_HDR_PLT_PRIV_MASK 0xff000000
  83. #define WMI_CMD_HDR_PLT_PRIV_OFFSET 0x00000000
  84. /* end of copy wmi.h */
  85. #define WMI_MIN_HEAD_ROOM 64
  86. /* WBUFF pool sizes for WMI */
  87. /* Allocation of size 256 bytes */
  88. #define WMI_WBUFF_POOL_0_SIZE 128
  89. /* Allocation of size 512 bytes */
  90. #define WMI_WBUFF_POOL_1_SIZE 16
  91. /* Allocation of size 1024 bytes */
  92. #define WMI_WBUFF_POOL_2_SIZE 8
  93. /* Allocation of size 2048 bytes */
  94. #define WMI_WBUFF_POOL_3_SIZE 8
  95. #define RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT 500
  96. #ifdef WMI_INTERFACE_EVENT_LOGGING
  97. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
  98. /* TODO Cleanup this backported function */
  99. static int wmi_bp_seq_printf(qdf_debugfs_file_t m, const char *f, ...)
  100. {
  101. va_list args;
  102. va_start(args, f);
  103. seq_vprintf(m, f, args);
  104. va_end(args);
  105. return 0;
  106. }
  107. #else
  108. #define wmi_bp_seq_printf(m, fmt, ...) seq_printf((m), fmt, ##__VA_ARGS__)
  109. #endif
  110. #ifndef MAX_WMI_INSTANCES
  111. #define CUSTOM_MGMT_CMD_DATA_SIZE 4
  112. #endif
  113. #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
  114. /* WMI commands */
  115. uint32_t g_wmi_command_buf_idx = 0;
  116. struct wmi_command_debug wmi_command_log_buffer[WMI_CMD_DEBUG_MAX_ENTRY];
  117. /* WMI commands TX completed */
  118. uint32_t g_wmi_command_tx_cmp_buf_idx = 0;
  119. struct wmi_command_cmp_debug
  120. wmi_command_tx_cmp_log_buffer[WMI_CMD_CMPL_DEBUG_MAX_ENTRY];
  121. /* WMI events when processed */
  122. uint32_t g_wmi_event_buf_idx = 0;
  123. struct wmi_event_debug wmi_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
  124. /* WMI events when queued */
  125. uint32_t g_wmi_rx_event_buf_idx = 0;
  126. struct wmi_event_debug wmi_rx_event_log_buffer[WMI_EVENT_DEBUG_MAX_ENTRY];
  127. #endif
  128. static void wmi_minidump_detach(struct wmi_unified *wmi_handle)
  129. {
  130. struct wmi_log_buf_t *info =
  131. &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
  132. uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
  133. qdf_minidump_remove(info->buf, buf_size, "wmi_tx_cmp");
  134. }
  135. static void wmi_minidump_attach(struct wmi_unified *wmi_handle)
  136. {
  137. struct wmi_log_buf_t *info =
  138. &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
  139. uint32_t buf_size = info->size * sizeof(struct wmi_command_cmp_debug);
  140. qdf_minidump_log(info->buf, buf_size, "wmi_tx_cmp");
  141. }
  142. #define WMI_COMMAND_RECORD(h, a, b) { \
  143. if (wmi_cmd_log_max_entry <= \
  144. *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)) \
  145. *(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx) = 0;\
  146. ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
  147. [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)]\
  148. .command = a; \
  149. qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \
  150. wmi_command_log_buf_info.buf) \
  151. [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].data,\
  152. b, wmi_record_max_length); \
  153. ((struct wmi_command_debug *)h->log_info.wmi_command_log_buf_info.buf)\
  154. [*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx)].\
  155. time = qdf_get_log_timestamp(); \
  156. (*(h->log_info.wmi_command_log_buf_info.p_buf_tail_idx))++; \
  157. h->log_info.wmi_command_log_buf_info.length++; \
  158. }
  159. #define WMI_COMMAND_TX_CMP_RECORD(h, a, b, da, pa) { \
  160. if (wmi_cmd_cmpl_log_max_entry <= \
  161. *(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))\
  162. *(h->log_info.wmi_command_tx_cmp_log_buf_info. \
  163. p_buf_tail_idx) = 0; \
  164. ((struct wmi_command_cmp_debug *)h->log_info. \
  165. wmi_command_tx_cmp_log_buf_info.buf) \
  166. [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \
  167. p_buf_tail_idx)]. \
  168. command = a; \
  169. qdf_mem_copy(((struct wmi_command_cmp_debug *)h->log_info. \
  170. wmi_command_tx_cmp_log_buf_info.buf) \
  171. [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \
  172. p_buf_tail_idx)]. \
  173. data, b, wmi_record_max_length); \
  174. ((struct wmi_command_cmp_debug *)h->log_info. \
  175. wmi_command_tx_cmp_log_buf_info.buf) \
  176. [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \
  177. p_buf_tail_idx)]. \
  178. time = qdf_get_log_timestamp(); \
  179. ((struct wmi_command_cmp_debug *)h->log_info. \
  180. wmi_command_tx_cmp_log_buf_info.buf) \
  181. [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \
  182. p_buf_tail_idx)]. \
  183. dma_addr = da; \
  184. ((struct wmi_command_cmp_debug *)h->log_info. \
  185. wmi_command_tx_cmp_log_buf_info.buf) \
  186. [*(h->log_info.wmi_command_tx_cmp_log_buf_info. \
  187. p_buf_tail_idx)]. \
  188. phy_addr = pa; \
  189. (*(h->log_info.wmi_command_tx_cmp_log_buf_info.p_buf_tail_idx))++;\
  190. h->log_info.wmi_command_tx_cmp_log_buf_info.length++; \
  191. }
  192. #define WMI_EVENT_RECORD(h, a, b) { \
  193. if (wmi_event_log_max_entry <= \
  194. *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)) \
  195. *(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx) = 0;\
  196. ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
  197. [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)]. \
  198. event = a; \
  199. qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \
  200. wmi_event_log_buf_info.buf) \
  201. [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].data, b,\
  202. wmi_record_max_length); \
  203. ((struct wmi_event_debug *)h->log_info.wmi_event_log_buf_info.buf)\
  204. [*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx)].time =\
  205. qdf_get_log_timestamp(); \
  206. (*(h->log_info.wmi_event_log_buf_info.p_buf_tail_idx))++; \
  207. h->log_info.wmi_event_log_buf_info.length++; \
  208. }
  209. #define WMI_RX_EVENT_RECORD(h, a, b) { \
  210. if (wmi_event_log_max_entry <= \
  211. *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))\
  212. *(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx) = 0;\
  213. ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
  214. [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
  215. event = a; \
  216. qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \
  217. wmi_rx_event_log_buf_info.buf) \
  218. [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
  219. data, b, wmi_record_max_length); \
  220. ((struct wmi_event_debug *)h->log_info.wmi_rx_event_log_buf_info.buf)\
  221. [*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx)].\
  222. time = qdf_get_log_timestamp(); \
  223. (*(h->log_info.wmi_rx_event_log_buf_info.p_buf_tail_idx))++; \
  224. h->log_info.wmi_rx_event_log_buf_info.length++; \
  225. }
  226. #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
  227. uint32_t g_wmi_mgmt_command_buf_idx = 0;
  228. struct
  229. wmi_command_debug wmi_mgmt_command_log_buffer[WMI_MGMT_TX_DEBUG_MAX_ENTRY];
  230. /* wmi_mgmt commands TX completed */
  231. uint32_t g_wmi_mgmt_command_tx_cmp_buf_idx = 0;
  232. struct wmi_command_debug
  233. wmi_mgmt_command_tx_cmp_log_buffer[WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY];
  234. /* wmi_mgmt events when received */
  235. uint32_t g_wmi_mgmt_rx_event_buf_idx = 0;
  236. struct wmi_event_debug
  237. wmi_mgmt_rx_event_log_buffer[WMI_MGMT_RX_DEBUG_MAX_ENTRY];
  238. /* wmi_diag events when received */
  239. uint32_t g_wmi_diag_rx_event_buf_idx = 0;
  240. struct wmi_event_debug
  241. wmi_diag_rx_event_log_buffer[WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY];
  242. #endif
  243. #define WMI_MGMT_COMMAND_RECORD(h, a, b) { \
  244. if (wmi_mgmt_tx_log_max_entry <= \
  245. *(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)) \
  246. *(h->log_info.wmi_mgmt_command_log_buf_info. \
  247. p_buf_tail_idx) = 0; \
  248. ((struct wmi_command_debug *)h->log_info. \
  249. wmi_mgmt_command_log_buf_info.buf) \
  250. [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
  251. command = a; \
  252. qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \
  253. wmi_mgmt_command_log_buf_info.buf) \
  254. [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
  255. data, b, \
  256. wmi_record_max_length); \
  257. ((struct wmi_command_debug *)h->log_info. \
  258. wmi_mgmt_command_log_buf_info.buf) \
  259. [*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx)].\
  260. time = qdf_get_log_timestamp(); \
  261. (*(h->log_info.wmi_mgmt_command_log_buf_info.p_buf_tail_idx))++;\
  262. h->log_info.wmi_mgmt_command_log_buf_info.length++; \
  263. }
  264. #define WMI_MGMT_COMMAND_TX_CMP_RECORD(h, a, b) { \
  265. if (wmi_mgmt_tx_cmpl_log_max_entry <= \
  266. *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \
  267. p_buf_tail_idx)) \
  268. *(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \
  269. p_buf_tail_idx) = 0; \
  270. ((struct wmi_command_debug *)h->log_info. \
  271. wmi_mgmt_command_tx_cmp_log_buf_info.buf) \
  272. [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \
  273. p_buf_tail_idx)].command = a; \
  274. qdf_mem_copy(((struct wmi_command_debug *)h->log_info. \
  275. wmi_mgmt_command_tx_cmp_log_buf_info.buf)\
  276. [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \
  277. p_buf_tail_idx)].data, b, \
  278. wmi_record_max_length); \
  279. ((struct wmi_command_debug *)h->log_info. \
  280. wmi_mgmt_command_tx_cmp_log_buf_info.buf) \
  281. [*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \
  282. p_buf_tail_idx)].time = \
  283. qdf_get_log_timestamp(); \
  284. (*(h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info. \
  285. p_buf_tail_idx))++; \
  286. h->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.length++; \
  287. }
  288. #define WMI_MGMT_RX_EVENT_RECORD(h, a, b) do { \
  289. if (wmi_mgmt_rx_log_max_entry <= \
  290. *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))\
  291. *(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx) = 0;\
  292. ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
  293. [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)]\
  294. .event = a; \
  295. qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \
  296. wmi_mgmt_event_log_buf_info.buf) \
  297. [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
  298. data, b, wmi_record_max_length); \
  299. ((struct wmi_event_debug *)h->log_info.wmi_mgmt_event_log_buf_info.buf)\
  300. [*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx)].\
  301. time = qdf_get_log_timestamp(); \
  302. (*(h->log_info.wmi_mgmt_event_log_buf_info.p_buf_tail_idx))++; \
  303. h->log_info.wmi_mgmt_event_log_buf_info.length++; \
  304. } while (0);
  305. #define WMI_DIAG_RX_EVENT_RECORD(h, a, b) do { \
  306. if (wmi_diag_log_max_entry <= \
  307. *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))\
  308. *(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx) = 0;\
  309. ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
  310. [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)]\
  311. .event = a; \
  312. qdf_mem_copy(((struct wmi_event_debug *)h->log_info. \
  313. wmi_diag_event_log_buf_info.buf) \
  314. [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
  315. data, b, wmi_record_max_length); \
  316. ((struct wmi_event_debug *)h->log_info.wmi_diag_event_log_buf_info.buf)\
  317. [*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx)].\
  318. time = qdf_get_log_timestamp(); \
  319. (*(h->log_info.wmi_diag_event_log_buf_info.p_buf_tail_idx))++; \
  320. h->log_info.wmi_diag_event_log_buf_info.length++; \
  321. } while (0);
  322. /* These are defined to made it as module param, which can be configured */
  323. /* WMI Commands */
  324. uint32_t wmi_cmd_log_max_entry = WMI_CMD_DEBUG_MAX_ENTRY;
  325. uint32_t wmi_cmd_cmpl_log_max_entry = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
  326. /* WMI Events */
  327. uint32_t wmi_event_log_max_entry = WMI_EVENT_DEBUG_MAX_ENTRY;
  328. /* WMI MGMT Tx */
  329. uint32_t wmi_mgmt_tx_log_max_entry = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
  330. uint32_t wmi_mgmt_tx_cmpl_log_max_entry = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
  331. /* WMI MGMT Rx */
  332. uint32_t wmi_mgmt_rx_log_max_entry = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
  333. /* WMI Diag Event */
  334. uint32_t wmi_diag_log_max_entry = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
  335. /* WMI capture size */
  336. uint32_t wmi_record_max_length = WMI_DEBUG_ENTRY_MAX_LENGTH;
  337. uint32_t wmi_display_size = 100;
  338. /**
  339. * wmi_log_init() - Initialize WMI event logging
  340. * @wmi_handle: WMI handle.
  341. *
  342. * Return: Initialization status
  343. */
  344. #ifndef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
  345. static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
  346. {
  347. struct wmi_log_buf_t *cmd_log_buf =
  348. &wmi_handle->log_info.wmi_command_log_buf_info;
  349. struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
  350. &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
  351. struct wmi_log_buf_t *event_log_buf =
  352. &wmi_handle->log_info.wmi_event_log_buf_info;
  353. struct wmi_log_buf_t *rx_event_log_buf =
  354. &wmi_handle->log_info.wmi_rx_event_log_buf_info;
  355. struct wmi_log_buf_t *mgmt_cmd_log_buf =
  356. &wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
  357. struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
  358. &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
  359. struct wmi_log_buf_t *mgmt_event_log_buf =
  360. &wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
  361. struct wmi_log_buf_t *diag_event_log_buf =
  362. &wmi_handle->log_info.wmi_diag_event_log_buf_info;
  363. /* WMI commands */
  364. cmd_log_buf->length = 0;
  365. cmd_log_buf->buf_tail_idx = 0;
  366. cmd_log_buf->buf = wmi_command_log_buffer;
  367. cmd_log_buf->p_buf_tail_idx = &g_wmi_command_buf_idx;
  368. cmd_log_buf->size = WMI_CMD_DEBUG_MAX_ENTRY;
  369. /* WMI commands TX completed */
  370. cmd_tx_cmpl_log_buf->length = 0;
  371. cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
  372. cmd_tx_cmpl_log_buf->buf = wmi_command_tx_cmp_log_buffer;
  373. cmd_tx_cmpl_log_buf->p_buf_tail_idx = &g_wmi_command_tx_cmp_buf_idx;
  374. cmd_tx_cmpl_log_buf->size = WMI_CMD_CMPL_DEBUG_MAX_ENTRY;
  375. /* WMI events when processed */
  376. event_log_buf->length = 0;
  377. event_log_buf->buf_tail_idx = 0;
  378. event_log_buf->buf = wmi_event_log_buffer;
  379. event_log_buf->p_buf_tail_idx = &g_wmi_event_buf_idx;
  380. event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
  381. /* WMI events when queued */
  382. rx_event_log_buf->length = 0;
  383. rx_event_log_buf->buf_tail_idx = 0;
  384. rx_event_log_buf->buf = wmi_rx_event_log_buffer;
  385. rx_event_log_buf->p_buf_tail_idx = &g_wmi_rx_event_buf_idx;
  386. rx_event_log_buf->size = WMI_EVENT_DEBUG_MAX_ENTRY;
  387. /* WMI Management commands */
  388. mgmt_cmd_log_buf->length = 0;
  389. mgmt_cmd_log_buf->buf_tail_idx = 0;
  390. mgmt_cmd_log_buf->buf = wmi_mgmt_command_log_buffer;
  391. mgmt_cmd_log_buf->p_buf_tail_idx = &g_wmi_mgmt_command_buf_idx;
  392. mgmt_cmd_log_buf->size = WMI_MGMT_TX_DEBUG_MAX_ENTRY;
  393. /* WMI Management commands Tx completed*/
  394. mgmt_cmd_tx_cmp_log_buf->length = 0;
  395. mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
  396. mgmt_cmd_tx_cmp_log_buf->buf = wmi_mgmt_command_tx_cmp_log_buffer;
  397. mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
  398. &g_wmi_mgmt_command_tx_cmp_buf_idx;
  399. mgmt_cmd_tx_cmp_log_buf->size = WMI_MGMT_TX_CMPL_DEBUG_MAX_ENTRY;
  400. /* WMI Management events when received */
  401. mgmt_event_log_buf->length = 0;
  402. mgmt_event_log_buf->buf_tail_idx = 0;
  403. mgmt_event_log_buf->buf = wmi_mgmt_rx_event_log_buffer;
  404. mgmt_event_log_buf->p_buf_tail_idx = &g_wmi_mgmt_rx_event_buf_idx;
  405. mgmt_event_log_buf->size = WMI_MGMT_RX_DEBUG_MAX_ENTRY;
  406. /* WMI diag events when received */
  407. diag_event_log_buf->length = 0;
  408. diag_event_log_buf->buf_tail_idx = 0;
  409. diag_event_log_buf->buf = wmi_diag_rx_event_log_buffer;
  410. diag_event_log_buf->p_buf_tail_idx = &g_wmi_diag_rx_event_buf_idx;
  411. diag_event_log_buf->size = WMI_DIAG_RX_EVENT_DEBUG_MAX_ENTRY;
  412. qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
  413. wmi_handle->log_info.wmi_logging_enable = 1;
  414. return QDF_STATUS_SUCCESS;
  415. }
  416. #else
  417. static QDF_STATUS wmi_log_init(struct wmi_unified *wmi_handle)
  418. {
  419. struct wmi_log_buf_t *cmd_log_buf =
  420. &wmi_handle->log_info.wmi_command_log_buf_info;
  421. struct wmi_log_buf_t *cmd_tx_cmpl_log_buf =
  422. &wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info;
  423. struct wmi_log_buf_t *event_log_buf =
  424. &wmi_handle->log_info.wmi_event_log_buf_info;
  425. struct wmi_log_buf_t *rx_event_log_buf =
  426. &wmi_handle->log_info.wmi_rx_event_log_buf_info;
  427. struct wmi_log_buf_t *mgmt_cmd_log_buf =
  428. &wmi_handle->log_info.wmi_mgmt_command_log_buf_info;
  429. struct wmi_log_buf_t *mgmt_cmd_tx_cmp_log_buf =
  430. &wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info;
  431. struct wmi_log_buf_t *mgmt_event_log_buf =
  432. &wmi_handle->log_info.wmi_mgmt_event_log_buf_info;
  433. struct wmi_log_buf_t *diag_event_log_buf =
  434. &wmi_handle->log_info.wmi_diag_event_log_buf_info;
  435. wmi_handle->log_info.wmi_logging_enable = 0;
  436. /* WMI commands */
  437. cmd_log_buf->length = 0;
  438. cmd_log_buf->buf_tail_idx = 0;
  439. cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
  440. wmi_cmd_log_max_entry * sizeof(struct wmi_command_debug));
  441. cmd_log_buf->size = wmi_cmd_log_max_entry;
  442. if (!cmd_log_buf->buf)
  443. return QDF_STATUS_E_NOMEM;
  444. cmd_log_buf->p_buf_tail_idx = &cmd_log_buf->buf_tail_idx;
  445. /* WMI commands TX completed */
  446. cmd_tx_cmpl_log_buf->length = 0;
  447. cmd_tx_cmpl_log_buf->buf_tail_idx = 0;
  448. cmd_tx_cmpl_log_buf->buf = (struct wmi_command_cmp_debug *) qdf_mem_malloc(
  449. wmi_cmd_cmpl_log_max_entry * sizeof(struct wmi_command_cmp_debug));
  450. cmd_tx_cmpl_log_buf->size = wmi_cmd_cmpl_log_max_entry;
  451. if (!cmd_tx_cmpl_log_buf->buf)
  452. return QDF_STATUS_E_NOMEM;
  453. cmd_tx_cmpl_log_buf->p_buf_tail_idx =
  454. &cmd_tx_cmpl_log_buf->buf_tail_idx;
  455. /* WMI events when processed */
  456. event_log_buf->length = 0;
  457. event_log_buf->buf_tail_idx = 0;
  458. event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
  459. wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
  460. event_log_buf->size = wmi_event_log_max_entry;
  461. if (!event_log_buf->buf)
  462. return QDF_STATUS_E_NOMEM;
  463. event_log_buf->p_buf_tail_idx = &event_log_buf->buf_tail_idx;
  464. /* WMI events when queued */
  465. rx_event_log_buf->length = 0;
  466. rx_event_log_buf->buf_tail_idx = 0;
  467. rx_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
  468. wmi_event_log_max_entry * sizeof(struct wmi_event_debug));
  469. rx_event_log_buf->size = wmi_event_log_max_entry;
  470. if (!rx_event_log_buf->buf)
  471. return QDF_STATUS_E_NOMEM;
  472. rx_event_log_buf->p_buf_tail_idx = &rx_event_log_buf->buf_tail_idx;
  473. /* WMI Management commands */
  474. mgmt_cmd_log_buf->length = 0;
  475. mgmt_cmd_log_buf->buf_tail_idx = 0;
  476. mgmt_cmd_log_buf->buf = (struct wmi_command_debug *) qdf_mem_malloc(
  477. wmi_mgmt_tx_log_max_entry * sizeof(struct wmi_command_debug));
  478. mgmt_cmd_log_buf->size = wmi_mgmt_tx_log_max_entry;
  479. if (!mgmt_cmd_log_buf->buf)
  480. return QDF_STATUS_E_NOMEM;
  481. mgmt_cmd_log_buf->p_buf_tail_idx = &mgmt_cmd_log_buf->buf_tail_idx;
  482. /* WMI Management commands Tx completed*/
  483. mgmt_cmd_tx_cmp_log_buf->length = 0;
  484. mgmt_cmd_tx_cmp_log_buf->buf_tail_idx = 0;
  485. mgmt_cmd_tx_cmp_log_buf->buf = (struct wmi_command_debug *)
  486. qdf_mem_malloc(
  487. wmi_mgmt_tx_cmpl_log_max_entry *
  488. sizeof(struct wmi_command_debug));
  489. mgmt_cmd_tx_cmp_log_buf->size = wmi_mgmt_tx_cmpl_log_max_entry;
  490. if (!mgmt_cmd_tx_cmp_log_buf->buf)
  491. return QDF_STATUS_E_NOMEM;
  492. mgmt_cmd_tx_cmp_log_buf->p_buf_tail_idx =
  493. &mgmt_cmd_tx_cmp_log_buf->buf_tail_idx;
  494. /* WMI Management events when received */
  495. mgmt_event_log_buf->length = 0;
  496. mgmt_event_log_buf->buf_tail_idx = 0;
  497. mgmt_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
  498. wmi_mgmt_rx_log_max_entry *
  499. sizeof(struct wmi_event_debug));
  500. mgmt_event_log_buf->size = wmi_mgmt_rx_log_max_entry;
  501. if (!mgmt_event_log_buf->buf)
  502. return QDF_STATUS_E_NOMEM;
  503. mgmt_event_log_buf->p_buf_tail_idx = &mgmt_event_log_buf->buf_tail_idx;
  504. /* WMI diag events when received */
  505. diag_event_log_buf->length = 0;
  506. diag_event_log_buf->buf_tail_idx = 0;
  507. diag_event_log_buf->buf = (struct wmi_event_debug *) qdf_mem_malloc(
  508. wmi_diag_log_max_entry *
  509. sizeof(struct wmi_event_debug));
  510. diag_event_log_buf->size = wmi_diag_log_max_entry;
  511. if (!diag_event_log_buf->buf)
  512. return QDF_STATUS_E_NOMEM;
  513. diag_event_log_buf->p_buf_tail_idx = &diag_event_log_buf->buf_tail_idx;
  514. qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
  515. wmi_handle->log_info.wmi_logging_enable = 1;
  516. wmi_filtered_logging_init(wmi_handle);
  517. return QDF_STATUS_SUCCESS;
  518. }
  519. #endif
  520. /**
  521. * wmi_log_buffer_free() - Free all dynamic allocated buffer memory for
  522. * event logging
  523. * @wmi_handle: WMI handle.
  524. *
  525. * Return: None
  526. */
  527. #ifdef WMI_INTERFACE_EVENT_LOGGING_DYNAMIC_ALLOC
  528. static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
  529. {
  530. wmi_filtered_logging_free(wmi_handle);
  531. if (wmi_handle->log_info.wmi_command_log_buf_info.buf)
  532. qdf_mem_free(wmi_handle->log_info.wmi_command_log_buf_info.buf);
  533. if (wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf)
  534. qdf_mem_free(
  535. wmi_handle->log_info.wmi_command_tx_cmp_log_buf_info.buf);
  536. if (wmi_handle->log_info.wmi_event_log_buf_info.buf)
  537. qdf_mem_free(wmi_handle->log_info.wmi_event_log_buf_info.buf);
  538. if (wmi_handle->log_info.wmi_rx_event_log_buf_info.buf)
  539. qdf_mem_free(
  540. wmi_handle->log_info.wmi_rx_event_log_buf_info.buf);
  541. if (wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf)
  542. qdf_mem_free(
  543. wmi_handle->log_info.wmi_mgmt_command_log_buf_info.buf);
  544. if (wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf)
  545. qdf_mem_free(
  546. wmi_handle->log_info.wmi_mgmt_command_tx_cmp_log_buf_info.buf);
  547. if (wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf)
  548. qdf_mem_free(
  549. wmi_handle->log_info.wmi_mgmt_event_log_buf_info.buf);
  550. if (wmi_handle->log_info.wmi_diag_event_log_buf_info.buf)
  551. qdf_mem_free(
  552. wmi_handle->log_info.wmi_diag_event_log_buf_info.buf);
  553. wmi_handle->log_info.wmi_logging_enable = 0;
  554. qdf_spinlock_destroy(&wmi_handle->log_info.wmi_record_lock);
  555. }
  556. #else
  557. static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle)
  558. {
  559. /* Do Nothing */
  560. }
  561. #endif
  562. /**
  563. * wmi_print_cmd_log_buffer() - an output agnostic wmi command log printer
  564. * @log_buffer: the command log buffer metadata of the buffer to print
  565. * @count: the maximum number of entries to print
  566. * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
  567. * @print_priv: any data required by the print method, e.g. a file handle
  568. *
  569. * Return: None
  570. */
  571. static void
  572. wmi_print_cmd_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
  573. qdf_abstract_print *print, void *print_priv)
  574. {
  575. static const int data_len =
  576. WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
  577. char str[128];
  578. uint32_t idx;
  579. if (count > log_buffer->size)
  580. count = log_buffer->size;
  581. if (count > log_buffer->length)
  582. count = log_buffer->length;
  583. /* subtract count from index, and wrap if necessary */
  584. idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
  585. idx %= log_buffer->size;
  586. print(print_priv, "Time (seconds) Cmd Id Payload");
  587. while (count) {
  588. struct wmi_command_debug *cmd_log = (struct wmi_command_debug *)
  589. &((struct wmi_command_debug *)log_buffer->buf)[idx];
  590. uint64_t secs, usecs;
  591. int len = 0;
  592. int i;
  593. qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
  594. len += scnprintf(str + len, sizeof(str) - len,
  595. "% 8lld.%06lld %6u (0x%06x) ",
  596. secs, usecs,
  597. cmd_log->command, cmd_log->command);
  598. for (i = 0; i < data_len; ++i) {
  599. len += scnprintf(str + len, sizeof(str) - len,
  600. "0x%08x ", cmd_log->data[i]);
  601. }
  602. print(print_priv, str);
  603. --count;
  604. ++idx;
  605. if (idx >= log_buffer->size)
  606. idx = 0;
  607. }
  608. }
  609. /**
  610. * wmi_print_cmd_cmp_log_buffer() - wmi command completion log printer
  611. * @log_buffer: the command completion log buffer metadata of the buffer to print
  612. * @count: the maximum number of entries to print
  613. * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
  614. * @print_priv: any data required by the print method, e.g. a file handle
  615. *
  616. * Return: None
  617. */
  618. static void
  619. wmi_print_cmd_cmp_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
  620. qdf_abstract_print *print, void *print_priv)
  621. {
  622. static const int data_len =
  623. WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
  624. char str[128];
  625. uint32_t idx;
  626. if (count > log_buffer->size)
  627. count = log_buffer->size;
  628. if (count > log_buffer->length)
  629. count = log_buffer->length;
  630. /* subtract count from index, and wrap if necessary */
  631. idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
  632. idx %= log_buffer->size;
  633. print(print_priv, "Time (seconds) Cmd Id Payload");
  634. while (count) {
  635. struct wmi_command_cmp_debug *cmd_log = (struct wmi_command_cmp_debug *)
  636. &((struct wmi_command_cmp_debug *)log_buffer->buf)[idx];
  637. uint64_t secs, usecs;
  638. int len = 0;
  639. int i;
  640. qdf_log_timestamp_to_secs(cmd_log->time, &secs, &usecs);
  641. len += scnprintf(str + len, sizeof(str) - len,
  642. "% 8lld.%06lld %6u (0x%06x) ",
  643. secs, usecs,
  644. cmd_log->command, cmd_log->command);
  645. for (i = 0; i < data_len; ++i) {
  646. len += scnprintf(str + len, sizeof(str) - len,
  647. "0x%08x ", cmd_log->data[i]);
  648. }
  649. print(print_priv, str);
  650. --count;
  651. ++idx;
  652. if (idx >= log_buffer->size)
  653. idx = 0;
  654. }
  655. }
  656. /**
  657. * wmi_print_event_log_buffer() - an output agnostic wmi event log printer
  658. * @log_buffer: the event log buffer metadata of the buffer to print
  659. * @count: the maximum number of entries to print
  660. * @print: an abstract print method, e.g. a qdf_print() or seq_printf() wrapper
  661. * @print_priv: any data required by the print method, e.g. a file handle
  662. *
  663. * Return: None
  664. */
  665. static void
  666. wmi_print_event_log_buffer(struct wmi_log_buf_t *log_buffer, uint32_t count,
  667. qdf_abstract_print *print, void *print_priv)
  668. {
  669. static const int data_len =
  670. WMI_DEBUG_ENTRY_MAX_LENGTH / sizeof(uint32_t);
  671. char str[128];
  672. uint32_t idx;
  673. if (count > log_buffer->size)
  674. count = log_buffer->size;
  675. if (count > log_buffer->length)
  676. count = log_buffer->length;
  677. /* subtract count from index, and wrap if necessary */
  678. idx = log_buffer->size + *log_buffer->p_buf_tail_idx - count;
  679. idx %= log_buffer->size;
  680. print(print_priv, "Time (seconds) Event Id Payload");
  681. while (count) {
  682. struct wmi_event_debug *event_log = (struct wmi_event_debug *)
  683. &((struct wmi_event_debug *)log_buffer->buf)[idx];
  684. uint64_t secs, usecs;
  685. int len = 0;
  686. int i;
  687. qdf_log_timestamp_to_secs(event_log->time, &secs, &usecs);
  688. len += scnprintf(str + len, sizeof(str) - len,
  689. "% 8lld.%06lld %6u (0x%06x) ",
  690. secs, usecs,
  691. event_log->event, event_log->event);
  692. for (i = 0; i < data_len; ++i) {
  693. len += scnprintf(str + len, sizeof(str) - len,
  694. "0x%08x ", event_log->data[i]);
  695. }
  696. print(print_priv, str);
  697. --count;
  698. ++idx;
  699. if (idx >= log_buffer->size)
  700. idx = 0;
  701. }
  702. }
  703. inline void
  704. wmi_print_cmd_log(wmi_unified_t wmi, uint32_t count,
  705. qdf_abstract_print *print, void *print_priv)
  706. {
  707. wmi_print_cmd_log_buffer(
  708. &wmi->log_info.wmi_command_log_buf_info,
  709. count, print, print_priv);
  710. }
  711. inline void
  712. wmi_print_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
  713. qdf_abstract_print *print, void *print_priv)
  714. {
  715. wmi_print_cmd_cmp_log_buffer(
  716. &wmi->log_info.wmi_command_tx_cmp_log_buf_info,
  717. count, print, print_priv);
  718. }
  719. inline void
  720. wmi_print_mgmt_cmd_log(wmi_unified_t wmi, uint32_t count,
  721. qdf_abstract_print *print, void *print_priv)
  722. {
  723. wmi_print_cmd_log_buffer(
  724. &wmi->log_info.wmi_mgmt_command_log_buf_info,
  725. count, print, print_priv);
  726. }
  727. inline void
  728. wmi_print_mgmt_cmd_tx_cmp_log(wmi_unified_t wmi, uint32_t count,
  729. qdf_abstract_print *print, void *print_priv)
  730. {
  731. wmi_print_cmd_log_buffer(
  732. &wmi->log_info.wmi_mgmt_command_tx_cmp_log_buf_info,
  733. count, print, print_priv);
  734. }
  735. inline void
  736. wmi_print_event_log(wmi_unified_t wmi, uint32_t count,
  737. qdf_abstract_print *print, void *print_priv)
  738. {
  739. wmi_print_event_log_buffer(
  740. &wmi->log_info.wmi_event_log_buf_info,
  741. count, print, print_priv);
  742. }
  743. inline void
  744. wmi_print_rx_event_log(wmi_unified_t wmi, uint32_t count,
  745. qdf_abstract_print *print, void *print_priv)
  746. {
  747. wmi_print_event_log_buffer(
  748. &wmi->log_info.wmi_rx_event_log_buf_info,
  749. count, print, print_priv);
  750. }
  751. inline void
  752. wmi_print_mgmt_event_log(wmi_unified_t wmi, uint32_t count,
  753. qdf_abstract_print *print, void *print_priv)
  754. {
  755. wmi_print_event_log_buffer(
  756. &wmi->log_info.wmi_mgmt_event_log_buf_info,
  757. count, print, print_priv);
  758. }
  759. /* debugfs routines*/
  760. /**
  761. * debug_wmi_##func_base##_show() - debugfs functions to display content of
  762. * command and event buffers. Macro uses max buffer length to display
  763. * buffer when it is wraparound.
  764. *
  765. * @m: debugfs handler to access wmi_handle
  766. * @v: Variable arguments (not used)
  767. *
  768. * Return: Length of characters printed
  769. */
  770. #define GENERATE_COMMAND_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
  771. static int debug_wmi_##func_base##_show(struct seq_file *m, \
  772. void *v) \
  773. { \
  774. wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \
  775. struct wmi_log_buf_t *wmi_log = \
  776. &wmi_handle->log_info.wmi_##func_base##_buf_info;\
  777. int pos, nread, outlen; \
  778. int i; \
  779. uint64_t secs, usecs; \
  780. \
  781. qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
  782. if (!wmi_log->length) { \
  783. qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
  784. return wmi_bp_seq_printf(m, \
  785. "no elements to read from ring buffer!\n"); \
  786. } \
  787. \
  788. if (wmi_log->length <= wmi_ring_size) \
  789. nread = wmi_log->length; \
  790. else \
  791. nread = wmi_ring_size; \
  792. \
  793. if (*(wmi_log->p_buf_tail_idx) == 0) \
  794. /* tail can be 0 after wrap-around */ \
  795. pos = wmi_ring_size - 1; \
  796. else \
  797. pos = *(wmi_log->p_buf_tail_idx) - 1; \
  798. \
  799. outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
  800. qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
  801. while (nread--) { \
  802. struct wmi_record_type *wmi_record; \
  803. \
  804. wmi_record = (struct wmi_record_type *) \
  805. &(((struct wmi_record_type *)wmi_log->buf)[pos]);\
  806. outlen += wmi_bp_seq_printf(m, "CMD ID = %x\n", \
  807. (wmi_record->command)); \
  808. qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
  809. &usecs); \
  810. outlen += \
  811. wmi_bp_seq_printf(m, "CMD TIME = [%llu.%06llu]\n",\
  812. secs, usecs); \
  813. outlen += wmi_bp_seq_printf(m, "CMD = "); \
  814. for (i = 0; i < (wmi_record_max_length/ \
  815. sizeof(uint32_t)); i++) \
  816. outlen += wmi_bp_seq_printf(m, "%x ", \
  817. wmi_record->data[i]); \
  818. outlen += wmi_bp_seq_printf(m, "\n"); \
  819. \
  820. if (pos == 0) \
  821. pos = wmi_ring_size - 1; \
  822. else \
  823. pos--; \
  824. } \
  825. return outlen; \
  826. } \
  827. #define GENERATE_EVENT_DEBUG_SHOW_FUNCS(func_base, wmi_ring_size) \
  828. static int debug_wmi_##func_base##_show(struct seq_file *m, \
  829. void *v) \
  830. { \
  831. wmi_unified_t wmi_handle = (wmi_unified_t) m->private; \
  832. struct wmi_log_buf_t *wmi_log = \
  833. &wmi_handle->log_info.wmi_##func_base##_buf_info;\
  834. int pos, nread, outlen; \
  835. int i; \
  836. uint64_t secs, usecs; \
  837. \
  838. qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
  839. if (!wmi_log->length) { \
  840. qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
  841. return wmi_bp_seq_printf(m, \
  842. "no elements to read from ring buffer!\n"); \
  843. } \
  844. \
  845. if (wmi_log->length <= wmi_ring_size) \
  846. nread = wmi_log->length; \
  847. else \
  848. nread = wmi_ring_size; \
  849. \
  850. if (*(wmi_log->p_buf_tail_idx) == 0) \
  851. /* tail can be 0 after wrap-around */ \
  852. pos = wmi_ring_size - 1; \
  853. else \
  854. pos = *(wmi_log->p_buf_tail_idx) - 1; \
  855. \
  856. outlen = wmi_bp_seq_printf(m, "Length = %d\n", wmi_log->length);\
  857. qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
  858. while (nread--) { \
  859. struct wmi_event_debug *wmi_record; \
  860. \
  861. wmi_record = (struct wmi_event_debug *) \
  862. &(((struct wmi_event_debug *)wmi_log->buf)[pos]);\
  863. qdf_log_timestamp_to_secs(wmi_record->time, &secs,\
  864. &usecs); \
  865. outlen += wmi_bp_seq_printf(m, "Event ID = %x\n",\
  866. (wmi_record->event)); \
  867. outlen += \
  868. wmi_bp_seq_printf(m, "Event TIME = [%llu.%06llu]\n",\
  869. secs, usecs); \
  870. outlen += wmi_bp_seq_printf(m, "CMD = "); \
  871. for (i = 0; i < (wmi_record_max_length/ \
  872. sizeof(uint32_t)); i++) \
  873. outlen += wmi_bp_seq_printf(m, "%x ", \
  874. wmi_record->data[i]); \
  875. outlen += wmi_bp_seq_printf(m, "\n"); \
  876. \
  877. if (pos == 0) \
  878. pos = wmi_ring_size - 1; \
  879. else \
  880. pos--; \
  881. } \
  882. return outlen; \
  883. }
  884. GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_log, wmi_display_size,
  885. wmi_command_debug);
  886. GENERATE_COMMAND_DEBUG_SHOW_FUNCS(command_tx_cmp_log, wmi_display_size,
  887. wmi_command_cmp_debug);
  888. GENERATE_EVENT_DEBUG_SHOW_FUNCS(event_log, wmi_display_size);
  889. GENERATE_EVENT_DEBUG_SHOW_FUNCS(rx_event_log, wmi_display_size);
  890. GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_log, wmi_display_size,
  891. wmi_command_debug);
  892. GENERATE_COMMAND_DEBUG_SHOW_FUNCS(mgmt_command_tx_cmp_log,
  893. wmi_display_size,
  894. wmi_command_debug);
  895. GENERATE_EVENT_DEBUG_SHOW_FUNCS(mgmt_event_log, wmi_display_size);
  896. /**
  897. * debug_wmi_enable_show() - debugfs functions to display enable state of
  898. * wmi logging feature.
  899. *
  900. * @m: debugfs handler to access wmi_handle
  901. * @v: Variable arguments (not used)
  902. *
  903. * Return: always 1
  904. */
  905. static int debug_wmi_enable_show(struct seq_file *m, void *v)
  906. {
  907. wmi_unified_t wmi_handle = (wmi_unified_t) m->private;
  908. return wmi_bp_seq_printf(m, "%d\n",
  909. wmi_handle->log_info.wmi_logging_enable);
  910. }
  911. /**
  912. * debug_wmi_log_size_show() - debugfs functions to display configured size of
  913. * wmi logging command/event buffer and management command/event buffer.
  914. *
  915. * @m: debugfs handler to access wmi_handle
  916. * @v: Variable arguments (not used)
  917. *
  918. * Return: Length of characters printed
  919. */
  920. static int debug_wmi_log_size_show(struct seq_file *m, void *v)
  921. {
  922. wmi_bp_seq_printf(m, "WMI command/cmpl log max size:%d/%d\n",
  923. wmi_cmd_log_max_entry, wmi_cmd_cmpl_log_max_entry);
  924. wmi_bp_seq_printf(m, "WMI management Tx/cmpl log max size:%d/%d\n",
  925. wmi_mgmt_tx_log_max_entry,
  926. wmi_mgmt_tx_cmpl_log_max_entry);
  927. wmi_bp_seq_printf(m, "WMI event log max size:%d\n",
  928. wmi_event_log_max_entry);
  929. wmi_bp_seq_printf(m, "WMI management Rx log max size:%d\n",
  930. wmi_mgmt_rx_log_max_entry);
  931. return wmi_bp_seq_printf(m,
  932. "WMI diag log max size:%d\n",
  933. wmi_diag_log_max_entry);
  934. }
  935. /**
  936. * debug_wmi_##func_base##_write() - debugfs functions to clear
  937. * wmi logging command/event buffer and management command/event buffer.
  938. *
  939. * @file: file handler to access wmi_handle
  940. * @buf: received data buffer
  941. * @count: length of received buffer
  942. * @ppos: Not used
  943. *
  944. * Return: count
  945. */
  946. #define GENERATE_DEBUG_WRITE_FUNCS(func_base, wmi_ring_size, wmi_record_type)\
  947. static ssize_t debug_wmi_##func_base##_write(struct file *file, \
  948. const char __user *buf, \
  949. size_t count, loff_t *ppos) \
  950. { \
  951. int k, ret; \
  952. wmi_unified_t wmi_handle = \
  953. ((struct seq_file *)file->private_data)->private;\
  954. struct wmi_log_buf_t *wmi_log = &wmi_handle->log_info. \
  955. wmi_##func_base##_buf_info; \
  956. char locbuf[50]; \
  957. \
  958. if ((!buf) || (count > 50)) \
  959. return -EFAULT; \
  960. \
  961. if (copy_from_user(locbuf, buf, count)) \
  962. return -EFAULT; \
  963. \
  964. ret = sscanf(locbuf, "%d", &k); \
  965. if ((ret != 1) || (k != 0)) { \
  966. wmi_err("Wrong input, echo 0 to clear the wmi buffer");\
  967. return -EINVAL; \
  968. } \
  969. \
  970. qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);\
  971. qdf_mem_zero(wmi_log->buf, wmi_ring_size * \
  972. sizeof(struct wmi_record_type)); \
  973. wmi_log->length = 0; \
  974. *(wmi_log->p_buf_tail_idx) = 0; \
  975. qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);\
  976. \
  977. return count; \
  978. }
  979. GENERATE_DEBUG_WRITE_FUNCS(command_log, wmi_cmd_log_max_entry,
  980. wmi_command_debug);
  981. GENERATE_DEBUG_WRITE_FUNCS(command_tx_cmp_log, wmi_cmd_cmpl_log_max_entry,
  982. wmi_command_cmp_debug);
  983. GENERATE_DEBUG_WRITE_FUNCS(event_log, wmi_event_log_max_entry,
  984. wmi_event_debug);
  985. GENERATE_DEBUG_WRITE_FUNCS(rx_event_log, wmi_event_log_max_entry,
  986. wmi_event_debug);
  987. GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_log, wmi_mgmt_tx_log_max_entry,
  988. wmi_command_debug);
  989. GENERATE_DEBUG_WRITE_FUNCS(mgmt_command_tx_cmp_log,
  990. wmi_mgmt_tx_cmpl_log_max_entry, wmi_command_debug);
  991. GENERATE_DEBUG_WRITE_FUNCS(mgmt_event_log, wmi_mgmt_rx_log_max_entry,
  992. wmi_event_debug);
  993. /**
  994. * debug_wmi_enable_write() - debugfs functions to enable/disable
  995. * wmi logging feature.
  996. *
  997. * @file: file handler to access wmi_handle
  998. * @buf: received data buffer
  999. * @count: length of received buffer
  1000. * @ppos: Not used
  1001. *
  1002. * Return: count
  1003. */
  1004. static ssize_t debug_wmi_enable_write(struct file *file, const char __user *buf,
  1005. size_t count, loff_t *ppos)
  1006. {
  1007. wmi_unified_t wmi_handle =
  1008. ((struct seq_file *)file->private_data)->private;
  1009. int k, ret;
  1010. char locbuf[50];
  1011. if ((!buf) || (count > 50))
  1012. return -EFAULT;
  1013. if (copy_from_user(locbuf, buf, count))
  1014. return -EFAULT;
  1015. ret = sscanf(locbuf, "%d", &k);
  1016. if ((ret != 1) || ((k != 0) && (k != 1)))
  1017. return -EINVAL;
  1018. wmi_handle->log_info.wmi_logging_enable = k;
  1019. return count;
  1020. }
  1021. /**
  1022. * debug_wmi_log_size_write() - reserved.
  1023. *
  1024. * @file: file handler to access wmi_handle
  1025. * @buf: received data buffer
  1026. * @count: length of received buffer
  1027. * @ppos: Not used
  1028. *
  1029. * Return: count
  1030. */
  1031. static ssize_t debug_wmi_log_size_write(struct file *file,
  1032. const char __user *buf, size_t count, loff_t *ppos)
  1033. {
  1034. return -EINVAL;
  1035. }
  1036. /* Structure to maintain debug information */
  1037. struct wmi_debugfs_info {
  1038. const char *name;
  1039. const struct file_operations *ops;
  1040. };
  1041. #define DEBUG_FOO(func_base) { .name = #func_base, \
  1042. .ops = &debug_##func_base##_ops }
  1043. /**
  1044. * debug_##func_base##_open() - Open debugfs entry for respective command
  1045. * and event buffer.
  1046. *
  1047. * @inode: node for debug dir entry
  1048. * @file: file handler
  1049. *
  1050. * Return: open status
  1051. */
  1052. #define GENERATE_DEBUG_STRUCTS(func_base) \
  1053. static int debug_##func_base##_open(struct inode *inode, \
  1054. struct file *file) \
  1055. { \
  1056. return single_open(file, debug_##func_base##_show, \
  1057. inode->i_private); \
  1058. } \
  1059. \
  1060. \
  1061. static struct file_operations debug_##func_base##_ops = { \
  1062. .open = debug_##func_base##_open, \
  1063. .read = seq_read, \
  1064. .llseek = seq_lseek, \
  1065. .write = debug_##func_base##_write, \
  1066. .release = single_release, \
  1067. };
  1068. GENERATE_DEBUG_STRUCTS(wmi_command_log);
  1069. GENERATE_DEBUG_STRUCTS(wmi_command_tx_cmp_log);
  1070. GENERATE_DEBUG_STRUCTS(wmi_event_log);
  1071. GENERATE_DEBUG_STRUCTS(wmi_rx_event_log);
  1072. GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_log);
  1073. GENERATE_DEBUG_STRUCTS(wmi_mgmt_command_tx_cmp_log);
  1074. GENERATE_DEBUG_STRUCTS(wmi_mgmt_event_log);
  1075. GENERATE_DEBUG_STRUCTS(wmi_enable);
  1076. GENERATE_DEBUG_STRUCTS(wmi_log_size);
  1077. #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
  1078. GENERATE_DEBUG_STRUCTS(filtered_wmi_cmds);
  1079. GENERATE_DEBUG_STRUCTS(filtered_wmi_evts);
  1080. GENERATE_DEBUG_STRUCTS(wmi_filtered_command_log);
  1081. GENERATE_DEBUG_STRUCTS(wmi_filtered_event_log);
  1082. #endif
  1083. struct wmi_debugfs_info wmi_debugfs_infos[NUM_DEBUG_INFOS] = {
  1084. DEBUG_FOO(wmi_command_log),
  1085. DEBUG_FOO(wmi_command_tx_cmp_log),
  1086. DEBUG_FOO(wmi_event_log),
  1087. DEBUG_FOO(wmi_rx_event_log),
  1088. DEBUG_FOO(wmi_mgmt_command_log),
  1089. DEBUG_FOO(wmi_mgmt_command_tx_cmp_log),
  1090. DEBUG_FOO(wmi_mgmt_event_log),
  1091. DEBUG_FOO(wmi_enable),
  1092. DEBUG_FOO(wmi_log_size),
  1093. #ifdef WMI_INTERFACE_FILTERED_EVENT_LOGGING
  1094. DEBUG_FOO(filtered_wmi_cmds),
  1095. DEBUG_FOO(filtered_wmi_evts),
  1096. DEBUG_FOO(wmi_filtered_command_log),
  1097. DEBUG_FOO(wmi_filtered_event_log),
  1098. #endif
  1099. };
  1100. /**
  1101. * wmi_debugfs_create() - Create debug_fs entry for wmi logging.
  1102. *
  1103. * @wmi_handle: wmi handle
  1104. * @par_entry: debug directory entry
  1105. * @id: Index to debug info data array
  1106. *
  1107. * Return: none
  1108. */
  1109. static void wmi_debugfs_create(wmi_unified_t wmi_handle,
  1110. struct dentry *par_entry)
  1111. {
  1112. int i;
  1113. if (!par_entry)
  1114. goto out;
  1115. for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
  1116. wmi_handle->debugfs_de[i] = qdf_debugfs_create_entry(
  1117. wmi_debugfs_infos[i].name,
  1118. WMI_INFOS_DBG_FILE_PERM,
  1119. par_entry,
  1120. wmi_handle,
  1121. wmi_debugfs_infos[i].ops);
  1122. if (!wmi_handle->debugfs_de[i]) {
  1123. wmi_err("debug Entry creation failed!");
  1124. goto out;
  1125. }
  1126. }
  1127. return;
  1128. out:
  1129. wmi_err("debug Entry creation failed!");
  1130. wmi_log_buffer_free(wmi_handle);
  1131. return;
  1132. }
  1133. /**
  1134. * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
  1135. * @wmi_handle: wmi handle
  1136. * @dentry: debugfs directory entry
  1137. * @id: Index to debug info data array
  1138. *
  1139. * Return: none
  1140. */
  1141. static void wmi_debugfs_remove(wmi_unified_t wmi_handle)
  1142. {
  1143. int i;
  1144. struct dentry *dentry = wmi_handle->log_info.wmi_log_debugfs_dir;
  1145. if (dentry) {
  1146. for (i = 0; i < NUM_DEBUG_INFOS; ++i) {
  1147. if (wmi_handle->debugfs_de[i])
  1148. wmi_handle->debugfs_de[i] = NULL;
  1149. }
  1150. }
  1151. if (dentry)
  1152. qdf_debugfs_remove_dir_recursive(dentry);
  1153. }
  1154. /**
  1155. * wmi_debugfs_init() - debugfs functions to create debugfs directory and to
  1156. * create debugfs enteries.
  1157. *
  1158. * @h: wmi handler
  1159. *
  1160. * Return: init status
  1161. */
  1162. static QDF_STATUS wmi_debugfs_init(wmi_unified_t wmi_handle, uint32_t pdev_idx)
  1163. {
  1164. char buf[32];
  1165. snprintf(buf, sizeof(buf), "WMI_SOC%u_PDEV%u",
  1166. wmi_handle->soc->soc_idx, pdev_idx);
  1167. wmi_handle->log_info.wmi_log_debugfs_dir =
  1168. qdf_debugfs_create_dir(buf, NULL);
  1169. if (!wmi_handle->log_info.wmi_log_debugfs_dir) {
  1170. wmi_err("error while creating debugfs dir for %s", buf);
  1171. return QDF_STATUS_E_FAILURE;
  1172. }
  1173. wmi_debugfs_create(wmi_handle,
  1174. wmi_handle->log_info.wmi_log_debugfs_dir);
  1175. return QDF_STATUS_SUCCESS;
  1176. }
  1177. /**
  1178. * wmi_mgmt_cmd_record() - Wrapper function for mgmt command logging macro
  1179. *
  1180. * @wmi_handle: wmi handle
  1181. * @cmd: mgmt command
  1182. * @header: pointer to 802.11 header
  1183. * @vdev_id: vdev id
  1184. * @chanfreq: channel frequency
  1185. *
  1186. * Return: none
  1187. */
  1188. void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
  1189. void *header, uint32_t vdev_id, uint32_t chanfreq)
  1190. {
  1191. uint32_t data[CUSTOM_MGMT_CMD_DATA_SIZE];
  1192. data[0] = ((struct wmi_command_header *)header)->type;
  1193. data[1] = ((struct wmi_command_header *)header)->sub_type;
  1194. data[2] = vdev_id;
  1195. data[3] = chanfreq;
  1196. qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
  1197. WMI_MGMT_COMMAND_RECORD(wmi_handle, cmd, (uint8_t *)data);
  1198. wmi_specific_cmd_record(wmi_handle, cmd, (uint8_t *)data);
  1199. qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
  1200. }
  1201. #else
  1202. /**
  1203. * wmi_debugfs_remove() - Remove debugfs entry for wmi logging.
  1204. * @wmi_handle: wmi handle
  1205. * @dentry: debugfs directory entry
  1206. * @id: Index to debug info data array
  1207. *
  1208. * Return: none
  1209. */
  1210. static void wmi_debugfs_remove(wmi_unified_t wmi_handle) { }
  1211. void wmi_mgmt_cmd_record(wmi_unified_t wmi_handle, uint32_t cmd,
  1212. void *header, uint32_t vdev_id, uint32_t chanfreq) { }
  1213. static inline void wmi_log_buffer_free(struct wmi_unified *wmi_handle) { }
  1214. static void wmi_minidump_detach(struct wmi_unified *wmi_handle) { }
  1215. static void wmi_minidump_attach(struct wmi_unified *wmi_handle) { }
  1216. #endif /*WMI_INTERFACE_EVENT_LOGGING */
  1217. qdf_export_symbol(wmi_mgmt_cmd_record);
  1218. #ifdef WMI_EXT_DBG
  1219. /**
  1220. * wmi_ext_dbg_msg_enqueue() - enqueue wmi message
  1221. * @wmi_handle: wmi handler
  1222. *
  1223. * Return: size of wmi message queue after enqueue
  1224. */
  1225. static uint32_t wmi_ext_dbg_msg_enqueue(struct wmi_unified *wmi_handle,
  1226. struct wmi_ext_dbg_msg *msg)
  1227. {
  1228. uint32_t list_size;
  1229. qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
  1230. qdf_list_insert_back_size(&wmi_handle->wmi_ext_dbg_msg_queue,
  1231. &msg->node, &list_size);
  1232. qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
  1233. return list_size;
  1234. }
  1235. /**
  1236. * wmi_ext_dbg_msg_dequeue() - dequeue wmi message
  1237. * @wmi_handle: wmi handler
  1238. *
  1239. * Return: wmi msg on success else NULL
  1240. */
  1241. static struct wmi_ext_dbg_msg *wmi_ext_dbg_msg_dequeue(struct wmi_unified
  1242. *wmi_handle)
  1243. {
  1244. qdf_list_node_t *list_node = NULL;
  1245. qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
  1246. qdf_list_remove_front(&wmi_handle->wmi_ext_dbg_msg_queue, &list_node);
  1247. qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
  1248. if (!list_node)
  1249. return NULL;
  1250. return qdf_container_of(list_node, struct wmi_ext_dbg_msg, node);
  1251. }
  1252. /**
  1253. * wmi_ext_dbg_msg_record() - record wmi messages
  1254. * @wmi_handle: wmi handler
  1255. * @buf: wmi message buffer
  1256. * @len: wmi message length
  1257. * @type: wmi message type
  1258. *
  1259. * Return: QDF_STATUS_SUCCESS on successful recording else failure.
  1260. */
  1261. static QDF_STATUS wmi_ext_dbg_msg_record(struct wmi_unified *wmi_handle,
  1262. uint8_t *buf, uint32_t len,
  1263. enum WMI_MSG_TYPE type)
  1264. {
  1265. struct wmi_ext_dbg_msg *msg;
  1266. uint32_t list_size;
  1267. msg = wmi_ext_dbg_msg_get(len);
  1268. if (!msg)
  1269. return QDF_STATUS_E_NOMEM;
  1270. msg->len = len;
  1271. msg->type = type;
  1272. qdf_mem_copy(msg->buf, buf, len);
  1273. msg->ts = qdf_get_log_timestamp();
  1274. list_size = wmi_ext_dbg_msg_enqueue(wmi_handle, msg);
  1275. if (list_size >= wmi_handle->wmi_ext_dbg_msg_queue_size) {
  1276. msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
  1277. wmi_ext_dbg_msg_put(msg);
  1278. }
  1279. return QDF_STATUS_SUCCESS;
  1280. }
  1281. /**
  1282. * wmi_ext_dbg_msg_cmd_record() - record wmi command messages
  1283. * @wmi_handle: wmi handler
  1284. * @buf: wmi command buffer
  1285. * @len: wmi command message length
  1286. *
  1287. * Return: QDF_STATUS_SUCCESS on successful recording else failure.
  1288. */
  1289. static QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified *wmi_handle,
  1290. uint8_t *buf, uint32_t len)
  1291. {
  1292. return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
  1293. WMI_MSG_TYPE_CMD);
  1294. }
  1295. /**
  1296. * wmi_ext_dbg_msg_event_record() - record wmi event messages
  1297. * @wmi_handle: wmi handler
  1298. * @buf: wmi event buffer
  1299. * @len: wmi event message length
  1300. *
  1301. * Return: QDF_STATUS_SUCCESS on successful recording else failure.
  1302. */
  1303. static QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified *wmi_handle,
  1304. uint8_t *buf, uint32_t len)
  1305. {
  1306. uint32_t id;
  1307. id = WMI_GET_FIELD(buf, WMI_CMD_HDR, COMMANDID);
  1308. if (id != wmi_handle->wmi_events[wmi_diag_event_id])
  1309. return wmi_ext_dbg_msg_record(wmi_handle, buf, len,
  1310. WMI_MSG_TYPE_EVENT);
  1311. return QDF_STATUS_SUCCESS;
  1312. }
  1313. /**
  1314. * wmi_ext_dbg_msg_queue_init() - create debugfs queue and associated lock
  1315. * @wmi_handle: wmi handler
  1316. *
  1317. * Return: none
  1318. */
  1319. static void wmi_ext_dbg_msg_queue_init(struct wmi_unified *wmi_handle)
  1320. {
  1321. qdf_list_create(&wmi_handle->wmi_ext_dbg_msg_queue,
  1322. wmi_handle->wmi_ext_dbg_msg_queue_size);
  1323. qdf_spinlock_create(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
  1324. }
  1325. /**
  1326. * wmi_ext_dbg_msg_queue_deinit() - destroy debugfs queue and associated lock
  1327. * @wmi_handle: wmi handler
  1328. *
  1329. * Return: none
  1330. */
  1331. static void wmi_ext_dbg_msg_queue_deinit(struct wmi_unified *wmi_handle)
  1332. {
  1333. qdf_list_destroy(&wmi_handle->wmi_ext_dbg_msg_queue);
  1334. qdf_spinlock_destroy(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
  1335. }
  1336. /**
  1337. * wmi_ext_dbg_msg_show() - debugfs function to display whole content of
  1338. * wmi command/event messages including headers.
  1339. * @file: qdf debugfs file handler
  1340. * @arg: pointer to wmi handler
  1341. *
  1342. * Return: QDF_STATUS_SUCCESS if all the messages are shown successfully,
  1343. * else QDF_STATUS_E_AGAIN if more data to show.
  1344. */
  1345. static QDF_STATUS wmi_ext_dbg_msg_show(qdf_debugfs_file_t file, void *arg)
  1346. {
  1347. struct wmi_unified *wmi_handle = (struct wmi_unified *)arg;
  1348. struct wmi_ext_dbg_msg *msg;
  1349. uint64_t secs, usecs;
  1350. msg = wmi_ext_dbg_msg_dequeue(wmi_handle);
  1351. if (!msg)
  1352. return QDF_STATUS_SUCCESS;
  1353. qdf_debugfs_printf(file, "%s: 0x%x\n",
  1354. msg->type == WMI_MSG_TYPE_CMD ? "COMMAND" :
  1355. "EVENT", WMI_GET_FIELD(msg->buf, WMI_CMD_HDR,
  1356. COMMANDID));
  1357. qdf_log_timestamp_to_secs(msg->ts, &secs, &usecs);
  1358. qdf_debugfs_printf(file, "Time: %llu.%llu\n", secs, usecs);
  1359. qdf_debugfs_printf(file, "Length:%d\n", msg->len);
  1360. qdf_debugfs_hexdump(file, msg->buf, msg->len,
  1361. WMI_EXT_DBG_DUMP_ROW_SIZE,
  1362. WMI_EXT_DBG_DUMP_GROUP_SIZE);
  1363. qdf_debugfs_printf(file, "\n");
  1364. if (qdf_debugfs_overflow(file)) {
  1365. qdf_spinlock_acquire(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
  1366. qdf_list_insert_front(&wmi_handle->wmi_ext_dbg_msg_queue,
  1367. &msg->node);
  1368. qdf_spinlock_release(&wmi_handle->wmi_ext_dbg_msg_queue_lock);
  1369. } else {
  1370. wmi_ext_dbg_msg_put(msg);
  1371. }
  1372. return QDF_STATUS_E_AGAIN;
  1373. }
  1374. /**
  1375. * wmi_ext_dbg_msg_write() - debugfs write not supported
  1376. * @priv: private data
  1377. * @buf: received data buffer
  1378. * @len: length of received buffer
  1379. *
  1380. * Return: QDF_STATUS_E_NOSUPPORT.
  1381. */
  1382. static QDF_STATUS wmi_ext_dbg_msg_write(void *priv, const char *buf,
  1383. qdf_size_t len)
  1384. {
  1385. return QDF_STATUS_E_NOSUPPORT;
  1386. }
  1387. static struct qdf_debugfs_fops wmi_ext_dbgfs_ops[WMI_MAX_RADIOS];
  1388. /**
  1389. * wmi_ext_debugfs_init() - init debugfs items for extended wmi dump.
  1390. * @wmi_handle: wmi handler
  1391. * @pdev_idx: pdev index
  1392. *
  1393. * Return: QDF_STATUS_SUCCESS if debugfs is initialized else
  1394. * QDF_STATUS_E_FAILURE
  1395. */
  1396. static QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
  1397. uint32_t pdev_idx)
  1398. {
  1399. qdf_dentry_t dentry;
  1400. char buf[32];
  1401. /* To maintain backward compatibility, naming convention for PDEV 0
  1402. * dentry is kept same as before. For more than 1 PDEV, dentry
  1403. * names will be appended with PDEVx.
  1404. */
  1405. if (wmi_handle->soc->soc_idx == 0 && pdev_idx == 0) {
  1406. dentry = qdf_debugfs_create_dir(WMI_EXT_DBG_DIR, NULL);
  1407. } else {
  1408. snprintf(buf, sizeof(buf), "WMI_EXT_DBG_SOC%u_PDEV%u",
  1409. wmi_handle->soc->soc_idx, pdev_idx);
  1410. dentry = qdf_debugfs_create_dir(buf, NULL);
  1411. }
  1412. if (!dentry) {
  1413. wmi_err("error while creating extended wmi debugfs dir");
  1414. return QDF_STATUS_E_FAILURE;
  1415. }
  1416. wmi_ext_dbgfs_ops[pdev_idx].show = wmi_ext_dbg_msg_show;
  1417. wmi_ext_dbgfs_ops[pdev_idx].write = wmi_ext_dbg_msg_write;
  1418. wmi_ext_dbgfs_ops[pdev_idx].priv = wmi_handle;
  1419. if (!qdf_debugfs_create_file(WMI_EXT_DBG_FILE, WMI_EXT_DBG_FILE_PERM,
  1420. dentry, &wmi_ext_dbgfs_ops[pdev_idx])) {
  1421. qdf_debugfs_remove_dir(dentry);
  1422. wmi_err("Error while creating extended wmi debugfs file");
  1423. return QDF_STATUS_E_FAILURE;
  1424. }
  1425. wmi_handle->wmi_ext_dbg_dentry = dentry;
  1426. wmi_handle->wmi_ext_dbg_msg_queue_size = WMI_EXT_DBG_QUEUE_SIZE;
  1427. wmi_ext_dbg_msg_queue_init(wmi_handle);
  1428. return QDF_STATUS_SUCCESS;
  1429. }
  1430. /**
  1431. * wmi_ext_debugfs_deinit() - cleanup/deinit debugfs items of extended wmi dump.
  1432. * @wmi_handle: wmi handler
  1433. *
  1434. * Return: QDF_STATUS_SUCCESS if cleanup is successful
  1435. */
  1436. static QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
  1437. {
  1438. struct wmi_ext_dbg_msg *msg;
  1439. while ((msg = wmi_ext_dbg_msg_dequeue(wmi_handle)))
  1440. wmi_ext_dbg_msg_put(msg);
  1441. wmi_ext_dbg_msg_queue_deinit(wmi_handle);
  1442. qdf_debugfs_remove_dir_recursive(wmi_handle->wmi_ext_dbg_dentry);
  1443. return QDF_STATUS_SUCCESS;
  1444. }
  1445. #else
  1446. static inline QDF_STATUS wmi_ext_dbg_msg_cmd_record(struct wmi_unified
  1447. *wmi_handle,
  1448. uint8_t *buf, uint32_t len)
  1449. {
  1450. return QDF_STATUS_SUCCESS;
  1451. }
  1452. static inline QDF_STATUS wmi_ext_dbg_msg_event_record(struct wmi_unified
  1453. *wmi_handle,
  1454. uint8_t *buf, uint32_t len)
  1455. {
  1456. return QDF_STATUS_SUCCESS;
  1457. }
  1458. static inline QDF_STATUS wmi_ext_dbgfs_init(struct wmi_unified *wmi_handle,
  1459. uint32_t pdev_idx)
  1460. {
  1461. return QDF_STATUS_SUCCESS;
  1462. }
  1463. static inline QDF_STATUS wmi_ext_dbgfs_deinit(struct wmi_unified *wmi_handle)
  1464. {
  1465. return QDF_STATUS_SUCCESS;
  1466. }
  1467. #endif /*WMI_EXT_DBG */
  1468. int wmi_get_host_credits(wmi_unified_t wmi_handle);
  1469. /* WMI buffer APIs */
  1470. #ifdef NBUF_MEMORY_DEBUG
  1471. wmi_buf_t
  1472. wmi_buf_alloc_debug(wmi_unified_t wmi_handle, uint32_t len,
  1473. const char *func_name,
  1474. uint32_t line_num)
  1475. {
  1476. wmi_buf_t wmi_buf;
  1477. if (roundup(len + sizeof(WMI_CMD_HDR), 4) > wmi_handle->max_msg_len) {
  1478. QDF_ASSERT(0);
  1479. return NULL;
  1480. }
  1481. wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, func_name,
  1482. line_num);
  1483. if (!wmi_buf)
  1484. wmi_buf = qdf_nbuf_alloc_debug(NULL,
  1485. roundup(len + WMI_MIN_HEAD_ROOM,
  1486. 4),
  1487. WMI_MIN_HEAD_ROOM, 4, false,
  1488. func_name, line_num);
  1489. if (!wmi_buf)
  1490. return NULL;
  1491. /* Clear the wmi buffer */
  1492. OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
  1493. /*
  1494. * Set the length of the buffer to match the allocation size.
  1495. */
  1496. qdf_nbuf_set_pktlen(wmi_buf, len);
  1497. return wmi_buf;
  1498. }
  1499. qdf_export_symbol(wmi_buf_alloc_debug);
  1500. void wmi_buf_free(wmi_buf_t net_buf)
  1501. {
  1502. net_buf = wbuff_buff_put(net_buf);
  1503. if (net_buf)
  1504. qdf_nbuf_free(net_buf);
  1505. }
  1506. qdf_export_symbol(wmi_buf_free);
  1507. #else
  1508. wmi_buf_t wmi_buf_alloc_fl(wmi_unified_t wmi_handle, uint32_t len,
  1509. const char *func, uint32_t line)
  1510. {
  1511. wmi_buf_t wmi_buf;
  1512. if (roundup(len + sizeof(WMI_CMD_HDR), 4) > wmi_handle->max_msg_len) {
  1513. QDF_DEBUG_PANIC("Invalid length %u (via %s:%u)",
  1514. len, func, line);
  1515. return NULL;
  1516. }
  1517. wmi_buf = wbuff_buff_get(wmi_handle->wbuff_handle, len, __func__,
  1518. __LINE__);
  1519. if (!wmi_buf)
  1520. wmi_buf = qdf_nbuf_alloc_fl(NULL, roundup(len +
  1521. WMI_MIN_HEAD_ROOM, 4), WMI_MIN_HEAD_ROOM, 4,
  1522. false, func, line);
  1523. if (!wmi_buf) {
  1524. wmi_nofl_err("%s:%d, failed to alloc len:%d", func, line, len);
  1525. return NULL;
  1526. }
  1527. /* Clear the wmi buffer */
  1528. OS_MEMZERO(qdf_nbuf_data(wmi_buf), len);
  1529. /*
  1530. * Set the length of the buffer to match the allocation size.
  1531. */
  1532. qdf_nbuf_set_pktlen(wmi_buf, len);
  1533. return wmi_buf;
  1534. }
  1535. qdf_export_symbol(wmi_buf_alloc_fl);
  1536. void wmi_buf_free(wmi_buf_t net_buf)
  1537. {
  1538. net_buf = wbuff_buff_put(net_buf);
  1539. if (net_buf)
  1540. qdf_nbuf_free(net_buf);
  1541. }
  1542. qdf_export_symbol(wmi_buf_free);
  1543. #endif
  1544. /**
  1545. * wmi_get_max_msg_len() - get maximum WMI message length
  1546. * @wmi_handle: WMI handle.
  1547. *
  1548. * This function returns the maximum WMI message length
  1549. *
  1550. * Return: maximum WMI message length
  1551. */
  1552. uint16_t wmi_get_max_msg_len(wmi_unified_t wmi_handle)
  1553. {
  1554. return wmi_handle->max_msg_len - WMI_MIN_HEAD_ROOM;
  1555. }
  1556. qdf_export_symbol(wmi_get_max_msg_len);
  1557. #ifndef WMI_CMD_STRINGS
  1558. static uint8_t *wmi_id_to_name(uint32_t wmi_command)
  1559. {
  1560. return "Invalid WMI cmd";
  1561. }
  1562. #endif
  1563. static inline void wmi_log_cmd_id(uint32_t cmd_id, uint32_t tag)
  1564. {
  1565. wmi_debug("Send WMI command:%s command_id:%d htc_tag:%d",
  1566. wmi_id_to_name(cmd_id), cmd_id, tag);
  1567. }
  1568. /**
  1569. * wmi_is_pm_resume_cmd() - check if a cmd is part of the resume sequence
  1570. * @cmd_id: command to check
  1571. *
  1572. * Return: true if the command is part of the resume sequence.
  1573. */
  1574. #ifdef WLAN_POWER_MANAGEMENT_OFFLOAD
  1575. static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
  1576. {
  1577. switch (cmd_id) {
  1578. case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
  1579. case WMI_PDEV_RESUME_CMDID:
  1580. return true;
  1581. default:
  1582. return false;
  1583. }
  1584. }
  1585. #else
  1586. static bool wmi_is_pm_resume_cmd(uint32_t cmd_id)
  1587. {
  1588. return false;
  1589. }
  1590. #endif
  1591. #ifdef FEATURE_WLAN_D0WOW
  1592. static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
  1593. {
  1594. wmi_d0_wow_enable_disable_cmd_fixed_param *cmd;
  1595. if (cmd_id == WMI_D0_WOW_ENABLE_DISABLE_CMDID) {
  1596. cmd = (wmi_d0_wow_enable_disable_cmd_fixed_param *)
  1597. wmi_buf_data(buf);
  1598. if (!cmd->enable)
  1599. return true;
  1600. else
  1601. return false;
  1602. }
  1603. return false;
  1604. }
  1605. #else
  1606. static bool wmi_is_legacy_d0wow_disable_cmd(wmi_buf_t buf, uint32_t cmd_id)
  1607. {
  1608. return false;
  1609. }
  1610. #endif
  1611. #ifdef WMI_INTERFACE_SEQUENCE_CHECK
  1612. static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
  1613. {
  1614. wmi_handle->wmi_sequence = 0;
  1615. wmi_handle->wmi_exp_sequence = 0;
  1616. wmi_handle->wmi_sequence_stop = false;
  1617. }
  1618. static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
  1619. {
  1620. qdf_spinlock_create(&wmi_handle->wmi_seq_lock);
  1621. wmi_interface_sequence_reset(wmi_handle);
  1622. }
  1623. static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
  1624. {
  1625. qdf_spinlock_destroy(&wmi_handle->wmi_seq_lock);
  1626. }
  1627. void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
  1628. {
  1629. wmi_handle->wmi_sequence_stop = true;
  1630. }
  1631. static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
  1632. HTC_PACKET *pkt,
  1633. const char *func, uint32_t line)
  1634. {
  1635. wmi_buf_t buf = GET_HTC_PACKET_NET_BUF_CONTEXT(pkt);
  1636. QDF_STATUS status;
  1637. qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
  1638. status = htc_send_pkt(wmi_handle->htc_handle, pkt);
  1639. if (QDF_STATUS_SUCCESS != status) {
  1640. qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
  1641. qdf_atomic_dec(&wmi_handle->pending_cmds);
  1642. wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
  1643. func, line, status);
  1644. qdf_mem_free(pkt);
  1645. return status;
  1646. }
  1647. /* Record the sequence number in the SKB */
  1648. qdf_nbuf_set_mark(buf, wmi_handle->wmi_sequence);
  1649. /* Increment the sequence number */
  1650. wmi_handle->wmi_sequence = (wmi_handle->wmi_sequence + 1)
  1651. & (wmi_handle->wmi_max_cmds - 1);
  1652. qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
  1653. return status;
  1654. }
  1655. static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
  1656. wmi_buf_t buf)
  1657. {
  1658. /* Skip sequence check when wmi sequence stop is set */
  1659. if (wmi_handle->wmi_sequence_stop)
  1660. return;
  1661. qdf_spin_lock_bh(&wmi_handle->wmi_seq_lock);
  1662. /* Match the completion sequence and expected sequence number */
  1663. if (qdf_nbuf_get_mark(buf) != wmi_handle->wmi_exp_sequence) {
  1664. qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
  1665. wmi_nofl_err("WMI Tx Completion Sequence number mismatch");
  1666. wmi_nofl_err("Expected %d Received %d",
  1667. wmi_handle->wmi_exp_sequence,
  1668. qdf_nbuf_get_mark(buf));
  1669. /* Trigger Recovery */
  1670. qdf_trigger_self_recovery(wmi_handle->soc,
  1671. QDF_WMI_BUF_SEQUENCE_MISMATCH);
  1672. } else {
  1673. /* Increment the expected sequence number */
  1674. wmi_handle->wmi_exp_sequence =
  1675. (wmi_handle->wmi_exp_sequence + 1)
  1676. & (wmi_handle->wmi_max_cmds - 1);
  1677. qdf_spin_unlock_bh(&wmi_handle->wmi_seq_lock);
  1678. }
  1679. }
  1680. #else
  1681. static inline void wmi_interface_sequence_reset(struct wmi_unified *wmi_handle)
  1682. {
  1683. }
  1684. static inline void wmi_interface_sequence_init(struct wmi_unified *wmi_handle)
  1685. {
  1686. }
  1687. static inline void wmi_interface_sequence_deinit(struct wmi_unified *wmi_handle)
  1688. {
  1689. }
  1690. void wmi_interface_sequence_stop(struct wmi_unified *wmi_handle)
  1691. {
  1692. }
  1693. static inline QDF_STATUS wmi_htc_send_pkt(struct wmi_unified *wmi_handle,
  1694. HTC_PACKET *pkt,
  1695. const char *func, uint32_t line)
  1696. {
  1697. QDF_STATUS status;
  1698. status = htc_send_pkt(wmi_handle->htc_handle, pkt);
  1699. if (QDF_STATUS_SUCCESS != status) {
  1700. qdf_atomic_dec(&wmi_handle->pending_cmds);
  1701. wmi_nofl_err("%s:%d, htc_send_pkt failed, status:%d",
  1702. func, line, status);
  1703. qdf_mem_free(pkt);
  1704. return status;
  1705. }
  1706. return status;
  1707. }
  1708. static inline void wmi_interface_sequence_check(struct wmi_unified *wmi_handle,
  1709. wmi_buf_t buf)
  1710. {
  1711. }
  1712. #endif
  1713. static inline void wmi_unified_debug_dump(wmi_unified_t wmi_handle)
  1714. {
  1715. wmi_nofl_err("Endpoint ID = %d, Tx Queue Depth = %d, soc_id = %u, target type = %s",
  1716. wmi_handle->wmi_endpoint_id,
  1717. htc_get_tx_queue_depth(wmi_handle->htc_handle,
  1718. wmi_handle->wmi_endpoint_id),
  1719. wmi_handle->soc->soc_idx,
  1720. (wmi_handle->target_type ==
  1721. WMI_TLV_TARGET ? "WMI_TLV_TARGET" :
  1722. "WMI_NON_TLV_TARGET"));
  1723. }
  1724. #ifdef SYSTEM_PM_CHECK
  1725. /**
  1726. * wmi_set_system_pm_pkt_tag() - API to set tag for system pm packets
  1727. * @htc_tag: HTC tag
  1728. * @buf: wmi cmd buffer
  1729. * @cmd_id: cmd id
  1730. *
  1731. * Return: None
  1732. */
  1733. static void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
  1734. uint32_t cmd_id)
  1735. {
  1736. switch (cmd_id) {
  1737. case WMI_WOW_ENABLE_CMDID:
  1738. case WMI_PDEV_SUSPEND_CMDID:
  1739. *htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
  1740. break;
  1741. case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
  1742. case WMI_PDEV_RESUME_CMDID:
  1743. *htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
  1744. break;
  1745. case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
  1746. if (wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id))
  1747. *htc_tag = HTC_TX_PACKET_SYSTEM_RESUME;
  1748. else
  1749. *htc_tag = HTC_TX_PACKET_SYSTEM_SUSPEND;
  1750. break;
  1751. default:
  1752. break;
  1753. }
  1754. }
  1755. #else
  1756. static inline void wmi_set_system_pm_pkt_tag(uint16_t *htc_tag, wmi_buf_t buf,
  1757. uint32_t cmd_id)
  1758. {
  1759. }
  1760. #endif
  1761. QDF_STATUS wmi_unified_cmd_send_fl(wmi_unified_t wmi_handle, wmi_buf_t buf,
  1762. uint32_t len, uint32_t cmd_id,
  1763. const char *func, uint32_t line)
  1764. {
  1765. HTC_PACKET *pkt;
  1766. uint16_t htc_tag = 0;
  1767. bool rtpm_inprogress;
  1768. rtpm_inprogress = wmi_get_runtime_pm_inprogress(wmi_handle);
  1769. if (rtpm_inprogress) {
  1770. htc_tag = wmi_handle->ops->wmi_set_htc_tx_tag(wmi_handle, buf,
  1771. cmd_id);
  1772. } else if (qdf_atomic_read(&wmi_handle->is_target_suspended) &&
  1773. !wmi_is_pm_resume_cmd(cmd_id) &&
  1774. !wmi_is_legacy_d0wow_disable_cmd(buf, cmd_id)) {
  1775. wmi_nofl_err("Target is suspended (via %s:%u)",
  1776. func, line);
  1777. return QDF_STATUS_E_BUSY;
  1778. }
  1779. if (wmi_handle->wmi_stopinprogress) {
  1780. wmi_nofl_err("%s:%d, WMI stop in progress, wmi_handle:%pK",
  1781. func, line, wmi_handle);
  1782. return QDF_STATUS_E_INVAL;
  1783. }
  1784. #ifndef WMI_NON_TLV_SUPPORT
  1785. /* Do sanity check on the TLV parameter structure */
  1786. if (wmi_handle->target_type == WMI_TLV_TARGET) {
  1787. void *buf_ptr = (void *)qdf_nbuf_data(buf);
  1788. if (wmi_handle->ops->wmi_check_command_params(NULL, buf_ptr, len, cmd_id)
  1789. != 0) {
  1790. wmi_nofl_err("%s:%d, Invalid WMI Param Buffer for Cmd:%d",
  1791. func, line, cmd_id);
  1792. return QDF_STATUS_E_INVAL;
  1793. }
  1794. }
  1795. #endif
  1796. if (qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
  1797. wmi_nofl_err("%s:%d, Failed to send cmd %x, no memory",
  1798. func, line, cmd_id);
  1799. return QDF_STATUS_E_NOMEM;
  1800. }
  1801. qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
  1802. WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
  1803. qdf_atomic_inc(&wmi_handle->pending_cmds);
  1804. if (qdf_atomic_read(&wmi_handle->pending_cmds) >=
  1805. wmi_handle->wmi_max_cmds) {
  1806. wmi_nofl_err("hostcredits = %d",
  1807. wmi_get_host_credits(wmi_handle));
  1808. htc_dump_counter_info(wmi_handle->htc_handle);
  1809. qdf_atomic_dec(&wmi_handle->pending_cmds);
  1810. wmi_nofl_err("%s:%d, MAX %d WMI Pending cmds reached",
  1811. func, line, wmi_handle->wmi_max_cmds);
  1812. wmi_unified_debug_dump(wmi_handle);
  1813. htc_ce_tasklet_debug_dump(wmi_handle->htc_handle);
  1814. qdf_trigger_self_recovery(wmi_handle->soc->wmi_psoc,
  1815. QDF_WMI_EXCEED_MAX_PENDING_CMDS);
  1816. return QDF_STATUS_E_BUSY;
  1817. }
  1818. pkt = qdf_mem_malloc_fl(sizeof(*pkt), func, line);
  1819. if (!pkt) {
  1820. qdf_atomic_dec(&wmi_handle->pending_cmds);
  1821. return QDF_STATUS_E_NOMEM;
  1822. }
  1823. if (!rtpm_inprogress)
  1824. wmi_set_system_pm_pkt_tag(&htc_tag, buf, cmd_id);
  1825. SET_HTC_PACKET_INFO_TX(pkt,
  1826. NULL,
  1827. qdf_nbuf_data(buf), len + sizeof(WMI_CMD_HDR),
  1828. wmi_handle->wmi_endpoint_id, htc_tag);
  1829. SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);
  1830. wmi_log_cmd_id(cmd_id, htc_tag);
  1831. wmi_ext_dbg_msg_cmd_record(wmi_handle,
  1832. qdf_nbuf_data(buf), qdf_nbuf_len(buf));
  1833. #ifdef WMI_INTERFACE_EVENT_LOGGING
  1834. if (wmi_handle->log_info.wmi_logging_enable) {
  1835. qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
  1836. /*
  1837. * Record 16 bytes of WMI cmd data -
  1838. * exclude TLV and WMI headers
  1839. *
  1840. * WMI mgmt command already recorded in wmi_mgmt_cmd_record
  1841. */
  1842. if (wmi_handle->ops->is_management_record(cmd_id) == false) {
  1843. uint8_t *tmpbuf = (uint8_t *)qdf_nbuf_data(buf) +
  1844. wmi_handle->soc->buf_offset_command;
  1845. WMI_COMMAND_RECORD(wmi_handle, cmd_id, tmpbuf);
  1846. wmi_specific_cmd_record(wmi_handle, cmd_id, tmpbuf);
  1847. }
  1848. qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
  1849. }
  1850. #endif
  1851. return wmi_htc_send_pkt(wmi_handle, pkt, func, line);
  1852. }
  1853. qdf_export_symbol(wmi_unified_cmd_send_fl);
  1854. /**
  1855. * wmi_unified_get_event_handler_ix() - gives event handler's index
  1856. * @wmi_handle: handle to wmi
  1857. * @event_id: wmi event id
  1858. *
  1859. * Return: event handler's index
  1860. */
  1861. static int wmi_unified_get_event_handler_ix(wmi_unified_t wmi_handle,
  1862. uint32_t event_id)
  1863. {
  1864. uint32_t idx = 0;
  1865. int32_t invalid_idx = -1;
  1866. struct wmi_soc *soc = wmi_handle->soc;
  1867. for (idx = 0; (idx < soc->max_event_idx &&
  1868. idx < WMI_UNIFIED_MAX_EVENT); ++idx) {
  1869. if (wmi_handle->event_id[idx] == event_id &&
  1870. wmi_handle->event_handler[idx]) {
  1871. return idx;
  1872. }
  1873. }
  1874. return invalid_idx;
  1875. }
  1876. /**
  1877. * wmi_register_event_handler_with_ctx() - register event handler with
  1878. * exec ctx and buffer type
  1879. * @wmi_handle: handle to wmi
  1880. * @event_id: wmi event id
  1881. * @handler_func: wmi event handler function
  1882. * @rx_ctx: rx execution context for wmi rx events
  1883. * @rx_buf_type: rx execution context for wmi rx events
  1884. *
  1885. * Return: QDF_STATUS_SUCCESS on successful register event else failure.
  1886. */
  1887. static QDF_STATUS
  1888. wmi_register_event_handler_with_ctx(wmi_unified_t wmi_handle,
  1889. uint32_t event_id,
  1890. wmi_unified_event_handler handler_func,
  1891. enum wmi_rx_exec_ctx rx_ctx,
  1892. enum wmi_rx_buff_type rx_buf_type)
  1893. {
  1894. uint32_t idx = 0;
  1895. uint32_t evt_id;
  1896. struct wmi_soc *soc;
  1897. if (!wmi_handle) {
  1898. wmi_err("WMI handle is NULL");
  1899. return QDF_STATUS_E_FAILURE;
  1900. }
  1901. soc = wmi_handle->soc;
  1902. if (event_id >= wmi_events_max ||
  1903. wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
  1904. QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
  1905. "%s: Event id %d is unavailable",
  1906. __func__, event_id);
  1907. return QDF_STATUS_E_FAILURE;
  1908. }
  1909. evt_id = wmi_handle->wmi_events[event_id];
  1910. if (wmi_unified_get_event_handler_ix(wmi_handle, evt_id) != -1) {
  1911. wmi_info("event handler already registered 0x%x", evt_id);
  1912. return QDF_STATUS_E_FAILURE;
  1913. }
  1914. if (soc->max_event_idx == WMI_UNIFIED_MAX_EVENT) {
  1915. wmi_err("no more event handlers 0x%x",
  1916. evt_id);
  1917. return QDF_STATUS_E_FAILURE;
  1918. }
  1919. QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_DEBUG,
  1920. "Registered event handler for event 0x%8x", evt_id);
  1921. idx = soc->max_event_idx;
  1922. wmi_handle->event_handler[idx] = handler_func;
  1923. wmi_handle->event_id[idx] = evt_id;
  1924. qdf_spin_lock_bh(&soc->ctx_lock);
  1925. wmi_handle->ctx[idx].exec_ctx = rx_ctx;
  1926. wmi_handle->ctx[idx].buff_type = rx_buf_type;
  1927. qdf_spin_unlock_bh(&soc->ctx_lock);
  1928. soc->max_event_idx++;
  1929. return QDF_STATUS_SUCCESS;
  1930. }
  1931. QDF_STATUS
  1932. wmi_unified_register_event(wmi_unified_t wmi_handle,
  1933. uint32_t event_id,
  1934. wmi_unified_event_handler handler_func)
  1935. {
  1936. return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
  1937. handler_func,
  1938. WMI_RX_UMAC_CTX,
  1939. WMI_RX_PROCESSED_BUFF);
  1940. }
  1941. QDF_STATUS
  1942. wmi_unified_register_event_handler(wmi_unified_t wmi_handle,
  1943. wmi_conv_event_id event_id,
  1944. wmi_unified_event_handler handler_func,
  1945. uint8_t rx_ctx)
  1946. {
  1947. return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
  1948. handler_func, rx_ctx,
  1949. WMI_RX_PROCESSED_BUFF);
  1950. }
  1951. qdf_export_symbol(wmi_unified_register_event_handler);
  1952. QDF_STATUS
  1953. wmi_unified_register_raw_event_handler(wmi_unified_t wmi_handle,
  1954. wmi_conv_event_id event_id,
  1955. wmi_unified_event_handler handler_func,
  1956. enum wmi_rx_exec_ctx rx_ctx)
  1957. {
  1958. return wmi_register_event_handler_with_ctx(wmi_handle, event_id,
  1959. handler_func, rx_ctx,
  1960. WMI_RX_RAW_BUFF);
  1961. }
  1962. qdf_export_symbol(wmi_unified_register_raw_event_handler);
  1963. QDF_STATUS wmi_unified_unregister_event(wmi_unified_t wmi_handle,
  1964. uint32_t event_id)
  1965. {
  1966. uint32_t idx = 0;
  1967. uint32_t evt_id;
  1968. struct wmi_soc *soc;
  1969. if (!wmi_handle) {
  1970. wmi_err("WMI handle is NULL");
  1971. return QDF_STATUS_E_FAILURE;
  1972. }
  1973. soc = wmi_handle->soc;
  1974. if (event_id >= wmi_events_max ||
  1975. wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
  1976. QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
  1977. "%s: Event id %d is unavailable",
  1978. __func__, event_id);
  1979. return QDF_STATUS_E_FAILURE;
  1980. }
  1981. evt_id = wmi_handle->wmi_events[event_id];
  1982. idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
  1983. if (idx == -1) {
  1984. wmi_warn("event handler is not registered: evt id 0x%x",
  1985. evt_id);
  1986. return QDF_STATUS_E_FAILURE;
  1987. }
  1988. wmi_handle->event_handler[idx] = NULL;
  1989. wmi_handle->event_id[idx] = 0;
  1990. --soc->max_event_idx;
  1991. wmi_handle->event_handler[idx] =
  1992. wmi_handle->event_handler[soc->max_event_idx];
  1993. wmi_handle->event_id[idx] =
  1994. wmi_handle->event_id[soc->max_event_idx];
  1995. qdf_spin_lock_bh(&soc->ctx_lock);
  1996. wmi_handle->ctx[idx].exec_ctx =
  1997. wmi_handle->ctx[soc->max_event_idx].exec_ctx;
  1998. wmi_handle->ctx[idx].buff_type =
  1999. wmi_handle->ctx[soc->max_event_idx].buff_type;
  2000. qdf_spin_unlock_bh(&soc->ctx_lock);
  2001. return QDF_STATUS_SUCCESS;
  2002. }
  2003. QDF_STATUS wmi_unified_unregister_event_handler(wmi_unified_t wmi_handle,
  2004. wmi_conv_event_id event_id)
  2005. {
  2006. uint32_t idx = 0;
  2007. uint32_t evt_id;
  2008. struct wmi_soc *soc;
  2009. if (!wmi_handle) {
  2010. wmi_err("WMI handle is NULL");
  2011. return QDF_STATUS_E_FAILURE;
  2012. }
  2013. soc = wmi_handle->soc;
  2014. if (event_id >= wmi_events_max ||
  2015. wmi_handle->wmi_events[event_id] == WMI_EVENT_ID_INVALID) {
  2016. wmi_err("Event id %d is unavailable", event_id);
  2017. return QDF_STATUS_E_FAILURE;
  2018. }
  2019. evt_id = wmi_handle->wmi_events[event_id];
  2020. idx = wmi_unified_get_event_handler_ix(wmi_handle, evt_id);
  2021. if (idx == -1) {
  2022. wmi_err("event handler is not registered: evt id 0x%x",
  2023. evt_id);
  2024. return QDF_STATUS_E_FAILURE;
  2025. }
  2026. wmi_handle->event_handler[idx] = NULL;
  2027. wmi_handle->event_id[idx] = 0;
  2028. --soc->max_event_idx;
  2029. wmi_handle->event_handler[idx] =
  2030. wmi_handle->event_handler[soc->max_event_idx];
  2031. wmi_handle->event_id[idx] =
  2032. wmi_handle->event_id[soc->max_event_idx];
  2033. qdf_spin_lock_bh(&soc->ctx_lock);
  2034. wmi_handle->ctx[idx].exec_ctx =
  2035. wmi_handle->ctx[soc->max_event_idx].exec_ctx;
  2036. wmi_handle->ctx[idx].buff_type =
  2037. wmi_handle->ctx[soc->max_event_idx].buff_type;
  2038. qdf_spin_unlock_bh(&soc->ctx_lock);
  2039. return QDF_STATUS_SUCCESS;
  2040. }
  2041. qdf_export_symbol(wmi_unified_unregister_event_handler);
  2042. static void
  2043. wmi_process_rx_diag_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
  2044. void *evt_buf)
  2045. {
  2046. uint32_t num_diag_events_pending;
  2047. qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
  2048. if (RX_DIAG_WQ_MAX_SIZE > 0) {
  2049. num_diag_events_pending = qdf_nbuf_queue_len(
  2050. &wmi_handle->diag_event_queue);
  2051. if (num_diag_events_pending >= RX_DIAG_WQ_MAX_SIZE) {
  2052. qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
  2053. wmi_handle->wmi_rx_diag_events_dropped++;
  2054. wmi_debug_rl("Rx diag events dropped count: %d",
  2055. wmi_handle->wmi_rx_diag_events_dropped);
  2056. qdf_nbuf_free(evt_buf);
  2057. return;
  2058. }
  2059. }
  2060. qdf_nbuf_queue_add(&wmi_handle->diag_event_queue, evt_buf);
  2061. qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
  2062. qdf_queue_work(0, wmi_handle->wmi_rx_diag_work_queue,
  2063. &wmi_handle->rx_diag_event_work);
  2064. }
  2065. void wmi_process_fw_event_worker_thread_ctx(struct wmi_unified *wmi_handle,
  2066. void *evt_buf)
  2067. {
  2068. qdf_spin_lock_bh(&wmi_handle->eventq_lock);
  2069. qdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
  2070. qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
  2071. qdf_queue_work(0, wmi_handle->wmi_rx_work_queue,
  2072. &wmi_handle->rx_event_work);
  2073. return;
  2074. }
  2075. qdf_export_symbol(wmi_process_fw_event_worker_thread_ctx);
  2076. uint32_t wmi_critical_events_in_flight(struct wmi_unified *wmi)
  2077. {
  2078. return qdf_atomic_read(&wmi->critical_events_in_flight);
  2079. }
  2080. static bool
  2081. wmi_is_event_critical(struct wmi_unified *wmi_handle, uint32_t event_id)
  2082. {
  2083. if (wmi_handle->wmi_events[wmi_roam_synch_event_id] == event_id)
  2084. return true;
  2085. return false;
  2086. }
  2087. static QDF_STATUS wmi_discard_fw_event(struct scheduler_msg *msg)
  2088. {
  2089. struct wmi_process_fw_event_params *event_param;
  2090. if (!msg->bodyptr)
  2091. return QDF_STATUS_E_INVAL;
  2092. event_param = (struct wmi_process_fw_event_params *)msg->bodyptr;
  2093. qdf_nbuf_free(event_param->evt_buf);
  2094. qdf_mem_free(msg->bodyptr);
  2095. msg->bodyptr = NULL;
  2096. msg->bodyval = 0;
  2097. msg->type = 0;
  2098. return QDF_STATUS_SUCCESS;
  2099. }
  2100. static QDF_STATUS wmi_process_fw_event_handler(struct scheduler_msg *msg)
  2101. {
  2102. struct wmi_process_fw_event_params *params =
  2103. (struct wmi_process_fw_event_params *)msg->bodyptr;
  2104. struct wmi_unified *wmi_handle;
  2105. uint32_t event_id;
  2106. wmi_handle = (struct wmi_unified *)params->wmi_handle;
  2107. event_id = WMI_GET_FIELD(qdf_nbuf_data(params->evt_buf),
  2108. WMI_CMD_HDR, COMMANDID);
  2109. wmi_process_fw_event(wmi_handle, params->evt_buf);
  2110. if (wmi_is_event_critical(wmi_handle, event_id))
  2111. qdf_atomic_dec(&wmi_handle->critical_events_in_flight);
  2112. qdf_mem_free(msg->bodyptr);
  2113. return QDF_STATUS_SUCCESS;
  2114. }
  2115. /**
  2116. * wmi_process_fw_event_sched_thread_ctx() - common event handler to serialize
  2117. * event processing through scheduler thread
  2118. * @ctx: wmi context
  2119. * @ev: event buffer
  2120. * @rx_ctx: rx execution context
  2121. *
  2122. * Return: 0 on success, errno on failure
  2123. */
  2124. static QDF_STATUS
  2125. wmi_process_fw_event_sched_thread_ctx(struct wmi_unified *wmi,
  2126. void *ev)
  2127. {
  2128. struct wmi_process_fw_event_params *params_buf;
  2129. struct scheduler_msg msg = { 0 };
  2130. uint32_t event_id;
  2131. params_buf = qdf_mem_malloc(sizeof(struct wmi_process_fw_event_params));
  2132. if (!params_buf) {
  2133. wmi_err("malloc failed");
  2134. qdf_nbuf_free(ev);
  2135. return QDF_STATUS_E_NOMEM;
  2136. }
  2137. params_buf->wmi_handle = wmi;
  2138. params_buf->evt_buf = ev;
  2139. event_id = WMI_GET_FIELD(qdf_nbuf_data(params_buf->evt_buf),
  2140. WMI_CMD_HDR, COMMANDID);
  2141. if (wmi_is_event_critical(wmi, event_id))
  2142. qdf_atomic_inc(&wmi->critical_events_in_flight);
  2143. msg.bodyptr = params_buf;
  2144. msg.bodyval = 0;
  2145. msg.callback = wmi_process_fw_event_handler;
  2146. msg.flush_callback = wmi_discard_fw_event;
  2147. if (QDF_STATUS_SUCCESS !=
  2148. scheduler_post_message(QDF_MODULE_ID_TARGET_IF,
  2149. QDF_MODULE_ID_TARGET_IF,
  2150. QDF_MODULE_ID_TARGET_IF, &msg)) {
  2151. qdf_nbuf_free(ev);
  2152. qdf_mem_free(params_buf);
  2153. return QDF_STATUS_E_FAULT;
  2154. }
  2155. return QDF_STATUS_SUCCESS;
  2156. }
  2157. /**
  2158. * wmi_get_pdev_ep: Get wmi handle based on endpoint
  2159. * @soc: handle to wmi soc
  2160. * @ep: endpoint id
  2161. *
  2162. * Return: none
  2163. */
  2164. static struct wmi_unified *wmi_get_pdev_ep(struct wmi_soc *soc,
  2165. HTC_ENDPOINT_ID ep)
  2166. {
  2167. uint32_t i;
  2168. for (i = 0; i < WMI_MAX_RADIOS; i++)
  2169. if (soc->wmi_endpoint_id[i] == ep)
  2170. break;
  2171. if (i == WMI_MAX_RADIOS)
  2172. return NULL;
  2173. return soc->wmi_pdev[i];
  2174. }
  2175. /**
  2176. * wmi_mtrace_rx() - Wrappper function for qdf_mtrace api
  2177. * @message_id: 32-Bit Wmi message ID
  2178. * @vdev_id: Vdev ID
  2179. * @data: Actual message contents
  2180. *
  2181. * This function converts the 32-bit WMI message ID in 15-bit message ID
  2182. * format for qdf_mtrace as in qdf_mtrace message there are only 15
  2183. * bits reserved for message ID.
  2184. * out of these 15-bits, 8-bits (From LSB) specifies the WMI_GRP_ID
  2185. * and remaining 7-bits specifies the actual WMI command. With this
  2186. * notation there can be maximum 256 groups and each group can have
  2187. * max 128 commands can be supported.
  2188. *
  2189. * Return: None
  2190. */
  2191. static void wmi_mtrace_rx(uint32_t message_id, uint16_t vdev_id, uint32_t data)
  2192. {
  2193. uint16_t mtrace_message_id;
  2194. mtrace_message_id = QDF_WMI_MTRACE_CMD_ID(message_id) |
  2195. (QDF_WMI_MTRACE_GRP_ID(message_id) <<
  2196. QDF_WMI_MTRACE_CMD_NUM_BITS);
  2197. qdf_mtrace(QDF_MODULE_ID_WMI, QDF_MODULE_ID_WMA,
  2198. mtrace_message_id, vdev_id, data);
  2199. }
  2200. /**
  2201. * wmi_process_control_rx() - process fw events callbacks
  2202. * @wmi_handle: handle to wmi_unified
  2203. * @evt_buf: handle to wmi_buf_t
  2204. *
  2205. * Return: none
  2206. */
  2207. static void wmi_process_control_rx(struct wmi_unified *wmi_handle,
  2208. wmi_buf_t evt_buf)
  2209. {
  2210. struct wmi_soc *soc = wmi_handle->soc;
  2211. uint32_t id;
  2212. uint32_t idx;
  2213. enum wmi_rx_exec_ctx exec_ctx;
  2214. id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
  2215. idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
  2216. if (qdf_unlikely(idx == A_ERROR)) {
  2217. wmi_debug("no handler registered for event id 0x%x", id);
  2218. qdf_nbuf_free(evt_buf);
  2219. return;
  2220. }
  2221. wmi_mtrace_rx(id, 0xFF, idx);
  2222. qdf_spin_lock_bh(&soc->ctx_lock);
  2223. exec_ctx = wmi_handle->ctx[idx].exec_ctx;
  2224. qdf_spin_unlock_bh(&soc->ctx_lock);
  2225. #ifdef WMI_INTERFACE_EVENT_LOGGING
  2226. if (wmi_handle->log_info.wmi_logging_enable) {
  2227. uint8_t *data;
  2228. data = qdf_nbuf_data(evt_buf);
  2229. qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
  2230. /* Exclude 4 bytes of TLV header */
  2231. if (wmi_handle->ops->is_diag_event(id)) {
  2232. WMI_DIAG_RX_EVENT_RECORD(wmi_handle, id,
  2233. ((uint8_t *) data +
  2234. wmi_handle->soc->buf_offset_event));
  2235. } else if (wmi_handle->ops->is_management_record(id)) {
  2236. WMI_MGMT_RX_EVENT_RECORD(wmi_handle, id,
  2237. ((uint8_t *) data +
  2238. wmi_handle->soc->buf_offset_event));
  2239. } else {
  2240. WMI_RX_EVENT_RECORD(wmi_handle, id, ((uint8_t *) data +
  2241. wmi_handle->soc->buf_offset_event));
  2242. }
  2243. qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
  2244. }
  2245. #endif
  2246. if (exec_ctx == WMI_RX_WORK_CTX) {
  2247. wmi_process_fw_event_worker_thread_ctx(wmi_handle, evt_buf);
  2248. } else if (exec_ctx == WMI_RX_TASKLET_CTX) {
  2249. wmi_process_fw_event(wmi_handle, evt_buf);
  2250. } else if (exec_ctx == WMI_RX_SERIALIZER_CTX) {
  2251. wmi_process_fw_event_sched_thread_ctx(wmi_handle, evt_buf);
  2252. } else if (exec_ctx == WMI_RX_DIAG_WORK_CTX) {
  2253. wmi_process_rx_diag_event_worker_thread_ctx(wmi_handle,
  2254. evt_buf);
  2255. } else {
  2256. wmi_err("Invalid event context %d", exec_ctx);
  2257. qdf_nbuf_free(evt_buf);
  2258. }
  2259. }
  2260. /**
  2261. * wmi_control_rx() - process fw events callbacks
  2262. * @ctx: handle to wmi
  2263. * @htc_packet: pointer to htc packet
  2264. *
  2265. * Return: none
  2266. */
  2267. static void wmi_control_rx(void *ctx, HTC_PACKET *htc_packet)
  2268. {
  2269. struct wmi_soc *soc = (struct wmi_soc *)ctx;
  2270. struct wmi_unified *wmi_handle;
  2271. wmi_buf_t evt_buf;
  2272. evt_buf = (wmi_buf_t)htc_packet->pPktContext;
  2273. wmi_handle = wmi_get_pdev_ep(soc, htc_packet->Endpoint);
  2274. if (!wmi_handle) {
  2275. wmi_err("unable to get wmi_handle to Endpoint %d",
  2276. htc_packet->Endpoint);
  2277. qdf_nbuf_free(evt_buf);
  2278. return;
  2279. }
  2280. wmi_process_control_rx(wmi_handle, evt_buf);
  2281. }
  2282. #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
  2283. /**
  2284. * wmi_control_diag_rx() - process diag fw events callbacks
  2285. * @ctx: handle to wmi
  2286. * @htc_packet: pointer to htc packet
  2287. *
  2288. * Return: none
  2289. */
  2290. static void wmi_control_diag_rx(void *ctx, HTC_PACKET *htc_packet)
  2291. {
  2292. struct wmi_soc *soc = (struct wmi_soc *)ctx;
  2293. struct wmi_unified *wmi_handle;
  2294. wmi_buf_t evt_buf;
  2295. evt_buf = (wmi_buf_t)htc_packet->pPktContext;
  2296. wmi_handle = soc->wmi_pdev[0];
  2297. if (!wmi_handle) {
  2298. wmi_err("unable to get wmi_handle for diag event end point id:%d", htc_packet->Endpoint);
  2299. qdf_nbuf_free(evt_buf);
  2300. return;
  2301. }
  2302. wmi_process_control_rx(wmi_handle, evt_buf);
  2303. }
  2304. #endif
  2305. #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
  2306. QDF_STATUS wmi_unified_cmd_send_over_qmi(struct wmi_unified *wmi_handle,
  2307. wmi_buf_t buf, uint32_t buflen,
  2308. uint32_t cmd_id)
  2309. {
  2310. QDF_STATUS status;
  2311. int32_t ret;
  2312. if (!qdf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR))) {
  2313. wmi_err("Failed to send cmd %x, no memory", cmd_id);
  2314. return QDF_STATUS_E_NOMEM;
  2315. }
  2316. qdf_mem_zero(qdf_nbuf_data(buf), sizeof(WMI_CMD_HDR));
  2317. WMI_SET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);
  2318. wmi_debug("Sending WMI_CMD_ID: 0x%x over qmi", cmd_id);
  2319. status = qdf_wmi_send_recv_qmi(qdf_nbuf_data(buf),
  2320. buflen + sizeof(WMI_CMD_HDR),
  2321. wmi_handle,
  2322. wmi_process_qmi_fw_event);
  2323. if (QDF_IS_STATUS_ERROR(status)) {
  2324. qdf_nbuf_pull_head(buf, sizeof(WMI_CMD_HDR));
  2325. wmi_warn("WMI send on QMI failed. Retrying WMI on HTC");
  2326. } else {
  2327. ret = qdf_atomic_inc_return(&wmi_handle->num_stats_over_qmi);
  2328. wmi_debug("num stats over qmi: %d", ret);
  2329. wmi_buf_free(buf);
  2330. }
  2331. return status;
  2332. }
  2333. static int __wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
  2334. {
  2335. struct wmi_unified *wmi_handle = wmi_cb_ctx;
  2336. wmi_buf_t evt_buf;
  2337. uint32_t evt_id;
  2338. if (!wmi_handle || !buf)
  2339. return -EINVAL;
  2340. evt_buf = wmi_buf_alloc(wmi_handle, len);
  2341. if (!evt_buf)
  2342. return -ENOMEM;
  2343. qdf_mem_copy(qdf_nbuf_data(evt_buf), buf, len);
  2344. evt_id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
  2345. wmi_debug("Received WMI_EVT_ID: %d over qmi", evt_id);
  2346. wmi_process_control_rx(wmi_handle, evt_buf);
  2347. return 0;
  2348. }
  2349. int wmi_process_qmi_fw_event(void *wmi_cb_ctx, void *buf, int len)
  2350. {
  2351. struct qdf_op_sync *op_sync;
  2352. int ret;
  2353. if (qdf_op_protect(&op_sync))
  2354. return -EINVAL;
  2355. ret = __wmi_process_qmi_fw_event(wmi_cb_ctx, buf, len);
  2356. qdf_op_unprotect(op_sync);
  2357. return ret;
  2358. }
  2359. #endif
  2360. /**
  2361. * wmi_process_fw_event() - process any fw event
  2362. * @wmi_handle: wmi handle
  2363. * @evt_buf: fw event buffer
  2364. *
  2365. * This function process fw event in caller context
  2366. *
  2367. * Return: none
  2368. */
  2369. void wmi_process_fw_event(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
  2370. {
  2371. __wmi_control_rx(wmi_handle, evt_buf);
  2372. }
  2373. /**
  2374. * __wmi_control_rx() - process serialize wmi event callback
  2375. * @wmi_handle: wmi handle
  2376. * @evt_buf: fw event buffer
  2377. *
  2378. * Return: none
  2379. */
  2380. void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
  2381. {
  2382. uint32_t id;
  2383. uint8_t *data;
  2384. uint32_t len;
  2385. void *wmi_cmd_struct_ptr = NULL;
  2386. #ifndef WMI_NON_TLV_SUPPORT
  2387. int tlv_ok_status = 0;
  2388. #endif
  2389. uint32_t idx = 0;
  2390. struct wmi_raw_event_buffer ev_buf;
  2391. enum wmi_rx_buff_type ev_buff_type;
  2392. id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
  2393. wmi_ext_dbg_msg_event_record(wmi_handle, qdf_nbuf_data(evt_buf),
  2394. qdf_nbuf_len(evt_buf));
  2395. if (qdf_nbuf_pull_head(evt_buf, sizeof(WMI_CMD_HDR)) == NULL)
  2396. goto end;
  2397. data = qdf_nbuf_data(evt_buf);
  2398. len = qdf_nbuf_len(evt_buf);
  2399. #ifndef WMI_NON_TLV_SUPPORT
  2400. if (wmi_handle->target_type == WMI_TLV_TARGET) {
  2401. /* Validate and pad(if necessary) the TLVs */
  2402. tlv_ok_status =
  2403. wmi_handle->ops->wmi_check_and_pad_event(wmi_handle->scn_handle,
  2404. data, len, id,
  2405. &wmi_cmd_struct_ptr);
  2406. if (tlv_ok_status != 0) {
  2407. QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
  2408. "%s: Error: id=0x%x, wmitlv check status=%d",
  2409. __func__, id, tlv_ok_status);
  2410. goto end;
  2411. }
  2412. }
  2413. #endif
  2414. idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
  2415. if (idx == A_ERROR) {
  2416. QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_ERROR,
  2417. "%s : event handler is not registered: event id 0x%x",
  2418. __func__, id);
  2419. goto end;
  2420. }
  2421. #ifdef WMI_INTERFACE_EVENT_LOGGING
  2422. if (wmi_handle->log_info.wmi_logging_enable) {
  2423. qdf_spin_lock_bh(&wmi_handle->log_info.wmi_record_lock);
  2424. /* Exclude 4 bytes of TLV header */
  2425. if (wmi_handle->ops->is_diag_event(id)) {
  2426. /*
  2427. * skip diag event logging in WMI event buffer
  2428. * as its already logged in WMI RX event buffer
  2429. */
  2430. } else if (wmi_handle->ops->is_management_record(id)) {
  2431. /*
  2432. * skip wmi mgmt event logging in WMI event buffer
  2433. * as its already logged in WMI RX event buffer
  2434. */
  2435. } else {
  2436. uint8_t *tmpbuf = (uint8_t *)data +
  2437. wmi_handle->soc->buf_offset_event;
  2438. WMI_EVENT_RECORD(wmi_handle, id, tmpbuf);
  2439. wmi_specific_evt_record(wmi_handle, id, tmpbuf);
  2440. }
  2441. qdf_spin_unlock_bh(&wmi_handle->log_info.wmi_record_lock);
  2442. }
  2443. #endif
  2444. /* Call the WMI registered event handler */
  2445. if (wmi_handle->target_type == WMI_TLV_TARGET) {
  2446. ev_buff_type = wmi_handle->ctx[idx].buff_type;
  2447. if (ev_buff_type == WMI_RX_PROCESSED_BUFF) {
  2448. wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
  2449. wmi_cmd_struct_ptr, len);
  2450. } else if (ev_buff_type == WMI_RX_RAW_BUFF) {
  2451. ev_buf.evt_raw_buf = data;
  2452. ev_buf.evt_processed_buf = wmi_cmd_struct_ptr;
  2453. wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
  2454. (void *)&ev_buf, len);
  2455. }
  2456. }
  2457. else
  2458. wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
  2459. data, len);
  2460. end:
  2461. /* Free event buffer and allocated event tlv */
  2462. #ifndef WMI_NON_TLV_SUPPORT
  2463. if (wmi_handle->target_type == WMI_TLV_TARGET)
  2464. wmi_handle->ops->wmi_free_allocated_event(id, &wmi_cmd_struct_ptr);
  2465. #endif
  2466. qdf_nbuf_free(evt_buf);
  2467. }
  2468. #define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
  2469. static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
  2470. {
  2471. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
  2472. "%s: WLAN_BUG_RCA: Message type %x has exceeded its alloted time of %ds",
  2473. __func__, msg_type_id, WMI_WQ_WD_TIMEOUT / 1000);
  2474. }
  2475. #ifdef CONFIG_SLUB_DEBUG_ON
  2476. static void wmi_workqueue_watchdog_bite(void *arg)
  2477. {
  2478. struct wmi_wq_dbg_info *info = arg;
  2479. wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
  2480. qdf_print_thread_trace(info->task);
  2481. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
  2482. "%s: Going down for WMI WQ Watchdog Bite!", __func__);
  2483. QDF_BUG(0);
  2484. }
  2485. #else
  2486. static inline void wmi_workqueue_watchdog_bite(void *arg)
  2487. {
  2488. struct wmi_wq_dbg_info *info = arg;
  2489. wmi_workqueue_watchdog_warn(info->wd_msg_type_id);
  2490. qdf_print_thread_trace(info->task);
  2491. }
  2492. #endif
  2493. /**
  2494. * wmi_rx_event_work() - process rx event in rx work queue context
  2495. * @arg: opaque pointer to wmi handle
  2496. *
  2497. * This function process any fw event to serialize it through rx worker thread.
  2498. *
  2499. * Return: none
  2500. */
  2501. static void wmi_rx_event_work(void *arg)
  2502. {
  2503. wmi_buf_t buf;
  2504. struct wmi_unified *wmi = arg;
  2505. qdf_timer_t wd_timer;
  2506. struct wmi_wq_dbg_info info;
  2507. /* initialize WMI workqueue watchdog timer */
  2508. qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
  2509. &info, QDF_TIMER_TYPE_SW);
  2510. qdf_spin_lock_bh(&wmi->eventq_lock);
  2511. buf = qdf_nbuf_queue_remove(&wmi->event_queue);
  2512. qdf_spin_unlock_bh(&wmi->eventq_lock);
  2513. while (buf) {
  2514. qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
  2515. info.wd_msg_type_id =
  2516. WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
  2517. info.wmi_wq = wmi->wmi_rx_work_queue;
  2518. info.task = qdf_get_current_task();
  2519. __wmi_control_rx(wmi, buf);
  2520. qdf_timer_stop(&wd_timer);
  2521. qdf_spin_lock_bh(&wmi->eventq_lock);
  2522. buf = qdf_nbuf_queue_remove(&wmi->event_queue);
  2523. qdf_spin_unlock_bh(&wmi->eventq_lock);
  2524. }
  2525. qdf_timer_free(&wd_timer);
  2526. }
  2527. /**
  2528. * wmi_rx_diag_event_work() - process rx diag event in work queue context
  2529. * @arg: opaque pointer to wmi handle
  2530. *
  2531. * This function process fw diag event to serialize it through rx worker thread.
  2532. *
  2533. * Return: none
  2534. */
  2535. static void wmi_rx_diag_event_work(void *arg)
  2536. {
  2537. wmi_buf_t buf;
  2538. struct wmi_unified *wmi = arg;
  2539. qdf_timer_t wd_timer;
  2540. struct wmi_wq_dbg_info info;
  2541. uint32_t diag_event_process_count = 0;
  2542. if (!wmi) {
  2543. wmi_err("Invalid WMI handle");
  2544. return;
  2545. }
  2546. /* initialize WMI workqueue watchdog timer */
  2547. qdf_timer_init(NULL, &wd_timer, &wmi_workqueue_watchdog_bite,
  2548. &info, QDF_TIMER_TYPE_SW);
  2549. qdf_spin_lock_bh(&wmi->diag_eventq_lock);
  2550. buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
  2551. qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
  2552. while (buf) {
  2553. qdf_timer_start(&wd_timer, WMI_WQ_WD_TIMEOUT);
  2554. info.wd_msg_type_id =
  2555. WMI_GET_FIELD(qdf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID);
  2556. info.wmi_wq = NULL;
  2557. info.task = qdf_get_current_task();
  2558. __wmi_control_rx(wmi, buf);
  2559. qdf_timer_stop(&wd_timer);
  2560. if (diag_event_process_count++ >
  2561. RX_DIAG_EVENT_WORK_PROCESS_MAX_COUNT) {
  2562. qdf_queue_work(0, wmi->wmi_rx_diag_work_queue,
  2563. &wmi->rx_diag_event_work);
  2564. break;
  2565. }
  2566. qdf_spin_lock_bh(&wmi->diag_eventq_lock);
  2567. buf = qdf_nbuf_queue_remove(&wmi->diag_event_queue);
  2568. qdf_spin_unlock_bh(&wmi->diag_eventq_lock);
  2569. }
  2570. qdf_timer_free(&wd_timer);
  2571. }
  2572. #ifdef FEATURE_RUNTIME_PM
  2573. /**
  2574. * wmi_runtime_pm_init() - initialize runtime pm wmi variables
  2575. * @wmi_handle: wmi context
  2576. */
  2577. static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
  2578. {
  2579. qdf_atomic_init(&wmi_handle->runtime_pm_inprogress);
  2580. }
  2581. /**
  2582. * wmi_set_runtime_pm_inprogress() - set runtime pm progress flag
  2583. * @wmi_handle: wmi context
  2584. * @val: runtime pm progress flag
  2585. */
  2586. void wmi_set_runtime_pm_inprogress(wmi_unified_t wmi_handle, A_BOOL val)
  2587. {
  2588. qdf_atomic_set(&wmi_handle->runtime_pm_inprogress, val);
  2589. }
  2590. /**
  2591. * wmi_get_runtime_pm_inprogress() - get runtime pm progress flag
  2592. * @wmi_handle: wmi context
  2593. */
  2594. inline bool wmi_get_runtime_pm_inprogress(wmi_unified_t wmi_handle)
  2595. {
  2596. return qdf_atomic_read(&wmi_handle->runtime_pm_inprogress);
  2597. }
  2598. #else
  2599. static void wmi_runtime_pm_init(struct wmi_unified *wmi_handle)
  2600. {
  2601. }
  2602. #endif
  2603. /**
  2604. * wmi_unified_get_soc_handle: Get WMI SoC handle
  2605. * @param wmi_handle: WMI context got from wmi_attach
  2606. *
  2607. * return: Pointer to Soc handle
  2608. */
  2609. void *wmi_unified_get_soc_handle(struct wmi_unified *wmi_handle)
  2610. {
  2611. return wmi_handle->soc;
  2612. }
  2613. /**
  2614. * wmi_interface_logging_init: Interface looging init
  2615. * @param wmi_handle: Pointer to wmi handle object
  2616. *
  2617. * return: None
  2618. */
  2619. #ifdef WMI_INTERFACE_EVENT_LOGGING
  2620. static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
  2621. uint32_t pdev_idx)
  2622. {
  2623. if (QDF_STATUS_SUCCESS == wmi_log_init(wmi_handle)) {
  2624. qdf_spinlock_create(&wmi_handle->log_info.wmi_record_lock);
  2625. wmi_debugfs_init(wmi_handle, pdev_idx);
  2626. }
  2627. }
  2628. #else
  2629. static inline void wmi_interface_logging_init(struct wmi_unified *wmi_handle,
  2630. uint32_t pdev_idx)
  2631. {
  2632. }
  2633. #endif
  2634. static QDF_STATUS wmi_initialize_worker_context(struct wmi_unified *wmi_handle)
  2635. {
  2636. wmi_handle->wmi_rx_work_queue =
  2637. qdf_alloc_unbound_workqueue("wmi_rx_event_work_queue");
  2638. if (!wmi_handle->wmi_rx_work_queue) {
  2639. wmi_err("failed to create wmi_rx_event_work_queue");
  2640. return QDF_STATUS_E_RESOURCES;
  2641. }
  2642. qdf_spinlock_create(&wmi_handle->eventq_lock);
  2643. qdf_nbuf_queue_init(&wmi_handle->event_queue);
  2644. qdf_create_work(0, &wmi_handle->rx_event_work,
  2645. wmi_rx_event_work, wmi_handle);
  2646. wmi_handle->wmi_rx_diag_work_queue =
  2647. qdf_alloc_unbound_workqueue("wmi_rx_diag_event_work_queue");
  2648. if (!wmi_handle->wmi_rx_diag_work_queue) {
  2649. wmi_err("failed to create wmi_rx_diag_event_work_queue");
  2650. return QDF_STATUS_E_RESOURCES;
  2651. }
  2652. qdf_spinlock_create(&wmi_handle->diag_eventq_lock);
  2653. qdf_nbuf_queue_init(&wmi_handle->diag_event_queue);
  2654. qdf_create_work(0, &wmi_handle->rx_diag_event_work,
  2655. wmi_rx_diag_event_work, wmi_handle);
  2656. wmi_handle->wmi_rx_diag_events_dropped = 0;
  2657. return QDF_STATUS_SUCCESS;
  2658. }
  2659. /**
  2660. * wmi_unified_get_pdev_handle: Get WMI SoC handle
  2661. * @param wmi_soc: Pointer to wmi soc object
  2662. * @param pdev_idx: pdev index
  2663. *
  2664. * return: Pointer to wmi handle or NULL on failure
  2665. */
  2666. void *wmi_unified_get_pdev_handle(struct wmi_soc *soc, uint32_t pdev_idx)
  2667. {
  2668. struct wmi_unified *wmi_handle;
  2669. QDF_STATUS status;
  2670. if (pdev_idx >= WMI_MAX_RADIOS)
  2671. return NULL;
  2672. if (!soc->wmi_pdev[pdev_idx]) {
  2673. wmi_handle =
  2674. (struct wmi_unified *) qdf_mem_malloc(
  2675. sizeof(struct wmi_unified));
  2676. if (!wmi_handle)
  2677. return NULL;
  2678. status = wmi_initialize_worker_context(wmi_handle);
  2679. if (QDF_IS_STATUS_ERROR(status))
  2680. goto error;
  2681. wmi_handle->scn_handle = soc->scn_handle;
  2682. wmi_handle->event_id = soc->event_id;
  2683. wmi_handle->event_handler = soc->event_handler;
  2684. wmi_handle->ctx = soc->ctx;
  2685. wmi_handle->ops = soc->ops;
  2686. wmi_handle->wmi_events = soc->wmi_events;
  2687. wmi_handle->services = soc->services;
  2688. wmi_handle->soc = soc;
  2689. wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
  2690. wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
  2691. wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
  2692. wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
  2693. wmi_interface_logging_init(wmi_handle, pdev_idx);
  2694. qdf_atomic_init(&wmi_handle->pending_cmds);
  2695. qdf_atomic_init(&wmi_handle->is_target_suspended);
  2696. wmi_handle->target_type = soc->target_type;
  2697. wmi_handle->wmi_max_cmds = soc->wmi_max_cmds;
  2698. wmi_interface_sequence_init(wmi_handle);
  2699. if (wmi_ext_dbgfs_init(wmi_handle, pdev_idx) !=
  2700. QDF_STATUS_SUCCESS)
  2701. wmi_err("Failed to initialize wmi extended debugfs");
  2702. soc->wmi_pdev[pdev_idx] = wmi_handle;
  2703. } else
  2704. wmi_handle = soc->wmi_pdev[pdev_idx];
  2705. wmi_handle->wmi_stopinprogress = 0;
  2706. wmi_handle->wmi_endpoint_id = soc->wmi_endpoint_id[pdev_idx];
  2707. wmi_handle->htc_handle = soc->htc_handle;
  2708. wmi_handle->max_msg_len = soc->max_msg_len[pdev_idx];
  2709. wmi_handle->tag_crash_inject = false;
  2710. wmi_interface_sequence_reset(wmi_handle);
  2711. return wmi_handle;
  2712. error:
  2713. qdf_mem_free(wmi_handle);
  2714. return NULL;
  2715. }
  2716. qdf_export_symbol(wmi_unified_get_pdev_handle);
  2717. static void (*wmi_attach_register[WMI_MAX_TARGET_TYPE])(wmi_unified_t);
  2718. void wmi_unified_register_module(enum wmi_target_type target_type,
  2719. void (*wmi_attach)(wmi_unified_t wmi_handle))
  2720. {
  2721. if (target_type < WMI_MAX_TARGET_TYPE)
  2722. wmi_attach_register[target_type] = wmi_attach;
  2723. return;
  2724. }
  2725. qdf_export_symbol(wmi_unified_register_module);
  2726. /**
  2727. * wmi_wbuff_register() - register wmi with wbuff
  2728. * @wmi_handle: handle to wmi
  2729. *
  2730. * @Return: void
  2731. */
  2732. static void wmi_wbuff_register(struct wmi_unified *wmi_handle)
  2733. {
  2734. struct wbuff_alloc_request wbuff_alloc[4];
  2735. wbuff_alloc[0].slot = WBUFF_POOL_0;
  2736. wbuff_alloc[0].size = WMI_WBUFF_POOL_0_SIZE;
  2737. wbuff_alloc[1].slot = WBUFF_POOL_1;
  2738. wbuff_alloc[1].size = WMI_WBUFF_POOL_1_SIZE;
  2739. wbuff_alloc[2].slot = WBUFF_POOL_2;
  2740. wbuff_alloc[2].size = WMI_WBUFF_POOL_2_SIZE;
  2741. wbuff_alloc[3].slot = WBUFF_POOL_3;
  2742. wbuff_alloc[3].size = WMI_WBUFF_POOL_3_SIZE;
  2743. wmi_handle->wbuff_handle = wbuff_module_register(wbuff_alloc, 4,
  2744. WMI_MIN_HEAD_ROOM, 4);
  2745. }
  2746. /**
  2747. * wmi_wbuff_deregister() - deregister wmi with wbuff
  2748. * @wmi_handle: handle to wmi
  2749. *
  2750. * @Return: void
  2751. */
  2752. static inline void wmi_wbuff_deregister(struct wmi_unified *wmi_handle)
  2753. {
  2754. wbuff_module_deregister(wmi_handle->wbuff_handle);
  2755. wmi_handle->wbuff_handle = NULL;
  2756. }
  2757. /**
  2758. * wmi_unified_attach() - attach for unified WMI
  2759. * @scn_handle: handle to SCN
  2760. * @osdev: OS device context
  2761. * @target_type: TLV or not-TLV based target
  2762. * @use_cookie: cookie based allocation enabled/disabled
  2763. * @ops: umac rx callbacks
  2764. * @psoc: objmgr psoc
  2765. *
  2766. * @Return: wmi handle.
  2767. */
  2768. void *wmi_unified_attach(void *scn_handle,
  2769. struct wmi_unified_attach_params *param)
  2770. {
  2771. struct wmi_unified *wmi_handle;
  2772. struct wmi_soc *soc;
  2773. QDF_STATUS status;
  2774. soc = (struct wmi_soc *) qdf_mem_malloc(sizeof(struct wmi_soc));
  2775. if (!soc)
  2776. return NULL;
  2777. wmi_handle =
  2778. (struct wmi_unified *) qdf_mem_malloc(
  2779. sizeof(struct wmi_unified));
  2780. if (!wmi_handle) {
  2781. qdf_mem_free(soc);
  2782. return NULL;
  2783. }
  2784. status = wmi_initialize_worker_context(wmi_handle);
  2785. if (QDF_IS_STATUS_ERROR(status))
  2786. goto error;
  2787. wmi_handle->soc = soc;
  2788. wmi_handle->soc->soc_idx = param->soc_id;
  2789. wmi_handle->soc->is_async_ep = param->is_async_ep;
  2790. wmi_handle->event_id = soc->event_id;
  2791. wmi_handle->event_handler = soc->event_handler;
  2792. wmi_handle->ctx = soc->ctx;
  2793. wmi_handle->wmi_events = soc->wmi_events;
  2794. wmi_handle->services = soc->services;
  2795. wmi_handle->scn_handle = scn_handle;
  2796. wmi_handle->cmd_pdev_id_map = soc->cmd_pdev_id_map;
  2797. wmi_handle->evt_pdev_id_map = soc->evt_pdev_id_map;
  2798. wmi_handle->cmd_phy_id_map = soc->cmd_phy_id_map;
  2799. wmi_handle->evt_phy_id_map = soc->evt_phy_id_map;
  2800. soc->scn_handle = scn_handle;
  2801. wmi_handle->target_type = param->target_type;
  2802. soc->target_type = param->target_type;
  2803. if (param->target_type >= WMI_MAX_TARGET_TYPE)
  2804. goto error;
  2805. if (wmi_attach_register[param->target_type]) {
  2806. wmi_attach_register[param->target_type](wmi_handle);
  2807. } else {
  2808. wmi_err("wmi attach is not registered");
  2809. goto error;
  2810. }
  2811. qdf_atomic_init(&wmi_handle->pending_cmds);
  2812. qdf_atomic_init(&wmi_handle->is_target_suspended);
  2813. qdf_atomic_init(&wmi_handle->is_target_suspend_acked);
  2814. qdf_atomic_init(&wmi_handle->num_stats_over_qmi);
  2815. wmi_runtime_pm_init(wmi_handle);
  2816. wmi_interface_logging_init(wmi_handle, WMI_HOST_PDEV_ID_0);
  2817. wmi_interface_sequence_init(wmi_handle);
  2818. /* Assign target cookie capablity */
  2819. wmi_handle->use_cookie = param->use_cookie;
  2820. wmi_handle->osdev = param->osdev;
  2821. wmi_handle->wmi_stopinprogress = 0;
  2822. wmi_handle->wmi_max_cmds = param->max_commands;
  2823. soc->wmi_max_cmds = param->max_commands;
  2824. /* Increase the ref count once refcount infra is present */
  2825. soc->wmi_psoc = param->psoc;
  2826. qdf_spinlock_create(&soc->ctx_lock);
  2827. soc->ops = wmi_handle->ops;
  2828. soc->wmi_pdev[0] = wmi_handle;
  2829. if (wmi_ext_dbgfs_init(wmi_handle, 0) != QDF_STATUS_SUCCESS)
  2830. wmi_err("Failed to initialize wmi extended debugfs");
  2831. wmi_wbuff_register(wmi_handle);
  2832. wmi_hang_event_notifier_register(wmi_handle);
  2833. wmi_minidump_attach(wmi_handle);
  2834. return wmi_handle;
  2835. error:
  2836. qdf_mem_free(soc);
  2837. qdf_mem_free(wmi_handle);
  2838. return NULL;
  2839. }
  2840. /**
  2841. * wmi_unified_detach() - detach for unified WMI
  2842. *
  2843. * @wmi_handle : handle to wmi.
  2844. *
  2845. * @Return: none.
  2846. */
  2847. void wmi_unified_detach(struct wmi_unified *wmi_handle)
  2848. {
  2849. wmi_buf_t buf;
  2850. struct wmi_soc *soc;
  2851. uint8_t i;
  2852. wmi_minidump_detach(wmi_handle);
  2853. wmi_hang_event_notifier_unregister();
  2854. wmi_wbuff_deregister(wmi_handle);
  2855. soc = wmi_handle->soc;
  2856. for (i = 0; i < WMI_MAX_RADIOS; i++) {
  2857. if (soc->wmi_pdev[i]) {
  2858. qdf_flush_workqueue(0,
  2859. soc->wmi_pdev[i]->wmi_rx_work_queue);
  2860. qdf_destroy_workqueue(0,
  2861. soc->wmi_pdev[i]->wmi_rx_work_queue);
  2862. wmi_debugfs_remove(soc->wmi_pdev[i]);
  2863. buf = qdf_nbuf_queue_remove(
  2864. &soc->wmi_pdev[i]->event_queue);
  2865. while (buf) {
  2866. qdf_nbuf_free(buf);
  2867. buf = qdf_nbuf_queue_remove(
  2868. &soc->wmi_pdev[i]->event_queue);
  2869. }
  2870. qdf_flush_work(&soc->wmi_pdev[i]->rx_diag_event_work);
  2871. buf = qdf_nbuf_queue_remove(
  2872. &soc->wmi_pdev[i]->diag_event_queue);
  2873. while (buf) {
  2874. qdf_nbuf_free(buf);
  2875. buf = qdf_nbuf_queue_remove(
  2876. &soc->wmi_pdev[i]->diag_event_queue);
  2877. }
  2878. wmi_log_buffer_free(soc->wmi_pdev[i]);
  2879. /* Free events logs list */
  2880. if (soc->wmi_pdev[i]->events_logs_list)
  2881. qdf_mem_free(
  2882. soc->wmi_pdev[i]->events_logs_list);
  2883. qdf_spinlock_destroy(&soc->wmi_pdev[i]->eventq_lock);
  2884. qdf_spinlock_destroy(
  2885. &soc->wmi_pdev[i]->diag_eventq_lock);
  2886. wmi_interface_sequence_deinit(soc->wmi_pdev[i]);
  2887. wmi_ext_dbgfs_deinit(soc->wmi_pdev[i]);
  2888. qdf_mem_free(soc->wmi_pdev[i]);
  2889. }
  2890. }
  2891. qdf_spinlock_destroy(&soc->ctx_lock);
  2892. if (soc->wmi_service_bitmap) {
  2893. qdf_mem_free(soc->wmi_service_bitmap);
  2894. soc->wmi_service_bitmap = NULL;
  2895. }
  2896. if (soc->wmi_ext_service_bitmap) {
  2897. qdf_mem_free(soc->wmi_ext_service_bitmap);
  2898. soc->wmi_ext_service_bitmap = NULL;
  2899. }
  2900. if (soc->wmi_ext2_service_bitmap) {
  2901. qdf_mem_free(soc->wmi_ext2_service_bitmap);
  2902. soc->wmi_ext2_service_bitmap = NULL;
  2903. }
  2904. /* Decrease the ref count once refcount infra is present */
  2905. soc->wmi_psoc = NULL;
  2906. qdf_mem_free(soc);
  2907. }
  2908. /**
  2909. * wmi_unified_remove_work() - detach for WMI work
  2910. * @wmi_handle: handle to WMI
  2911. *
  2912. * A function that does not fully detach WMI, but just remove work
  2913. * queue items associated with it. This is used to make sure that
  2914. * before any other processing code that may destroy related contexts
  2915. * (HTC, etc), work queue processing on WMI has already been stopped.
  2916. *
  2917. * Return: None
  2918. */
  2919. void
  2920. wmi_unified_remove_work(struct wmi_unified *wmi_handle)
  2921. {
  2922. wmi_buf_t buf;
  2923. qdf_flush_workqueue(0, wmi_handle->wmi_rx_work_queue);
  2924. qdf_spin_lock_bh(&wmi_handle->eventq_lock);
  2925. buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
  2926. while (buf) {
  2927. qdf_nbuf_free(buf);
  2928. buf = qdf_nbuf_queue_remove(&wmi_handle->event_queue);
  2929. }
  2930. qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
  2931. /* Remove diag events work */
  2932. qdf_flush_workqueue(0, wmi_handle->wmi_rx_diag_work_queue);
  2933. qdf_spin_lock_bh(&wmi_handle->diag_eventq_lock);
  2934. buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
  2935. while (buf) {
  2936. qdf_nbuf_free(buf);
  2937. buf = qdf_nbuf_queue_remove(&wmi_handle->diag_event_queue);
  2938. }
  2939. qdf_spin_unlock_bh(&wmi_handle->diag_eventq_lock);
  2940. }
  2941. /**
  2942. * wmi_htc_tx_complete() - Process htc tx completion
  2943. *
  2944. * @ctx: handle to wmi
  2945. * @htc_packet: pointer to htc packet
  2946. *
  2947. * @Return: none.
  2948. */
  2949. static void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
  2950. {
  2951. struct wmi_soc *soc = (struct wmi_soc *) ctx;
  2952. wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
  2953. u_int8_t *buf_ptr;
  2954. u_int32_t len;
  2955. struct wmi_unified *wmi_handle;
  2956. #ifdef WMI_INTERFACE_EVENT_LOGGING
  2957. struct wmi_debug_log_info *log_info;
  2958. uint32_t cmd_id;
  2959. uint8_t *offset_ptr;
  2960. qdf_dma_addr_t dma_addr;
  2961. uint64_t phy_addr;
  2962. #endif
  2963. ASSERT(wmi_cmd_buf);
  2964. wmi_handle = wmi_get_pdev_ep(soc, htc_pkt->Endpoint);
  2965. if (!wmi_handle) {
  2966. wmi_err("Unable to get wmi handle");
  2967. QDF_ASSERT(0);
  2968. return;
  2969. }
  2970. buf_ptr = (u_int8_t *)wmi_buf_data(wmi_cmd_buf);
  2971. #ifdef WMI_INTERFACE_EVENT_LOGGING
  2972. log_info = &wmi_handle->log_info;
  2973. if (wmi_handle && log_info->wmi_logging_enable) {
  2974. cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf),
  2975. WMI_CMD_HDR, COMMANDID);
  2976. dma_addr = QDF_NBUF_CB_PADDR(wmi_cmd_buf);
  2977. phy_addr = qdf_mem_virt_to_phys(qdf_nbuf_data(wmi_cmd_buf));
  2978. qdf_spin_lock_bh(&log_info->wmi_record_lock);
  2979. /* Record 16 bytes of WMI cmd tx complete data
  2980. * - exclude TLV and WMI headers
  2981. */
  2982. offset_ptr = buf_ptr + wmi_handle->soc->buf_offset_command;
  2983. if (wmi_handle->ops->is_management_record(cmd_id)) {
  2984. WMI_MGMT_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
  2985. offset_ptr);
  2986. } else {
  2987. WMI_COMMAND_TX_CMP_RECORD(wmi_handle, cmd_id,
  2988. offset_ptr, dma_addr,
  2989. phy_addr);
  2990. }
  2991. qdf_spin_unlock_bh(&log_info->wmi_record_lock);
  2992. }
  2993. #endif
  2994. wmi_interface_sequence_check(wmi_handle, wmi_cmd_buf);
  2995. len = qdf_nbuf_len(wmi_cmd_buf);
  2996. qdf_mem_zero(buf_ptr, len);
  2997. wmi_buf_free(wmi_cmd_buf);
  2998. qdf_mem_free(htc_pkt);
  2999. qdf_atomic_dec(&wmi_handle->pending_cmds);
  3000. }
  3001. #ifdef FEATURE_RUNTIME_PM
  3002. /**
  3003. * wmi_htc_log_pkt() - Print information of WMI command from HTC packet
  3004. *
  3005. * @ctx: handle of WMI context
  3006. * @htc_pkt: handle of HTC packet
  3007. *
  3008. * @Return: none
  3009. */
  3010. static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
  3011. {
  3012. wmi_buf_t wmi_cmd_buf = GET_HTC_PACKET_NET_BUF_CONTEXT(htc_pkt);
  3013. uint32_t cmd_id;
  3014. ASSERT(wmi_cmd_buf);
  3015. cmd_id = WMI_GET_FIELD(qdf_nbuf_data(wmi_cmd_buf), WMI_CMD_HDR,
  3016. COMMANDID);
  3017. wmi_debug("WMI command from HTC packet: %s, ID: %d",
  3018. wmi_id_to_name(cmd_id), cmd_id);
  3019. }
  3020. #else
  3021. static void wmi_htc_log_pkt(void *ctx, HTC_PACKET *htc_pkt)
  3022. {
  3023. }
  3024. #endif
  3025. /**
  3026. * wmi_connect_pdev_htc_service() - WMI API to get connect to HTC service
  3027. *
  3028. * @wmi_handle: handle to WMI.
  3029. * @pdev_idx: Pdev index
  3030. *
  3031. * @Return: QDF_STATUS
  3032. */
  3033. static QDF_STATUS wmi_connect_pdev_htc_service(struct wmi_soc *soc,
  3034. uint32_t pdev_idx)
  3035. {
  3036. QDF_STATUS status;
  3037. struct htc_service_connect_resp response;
  3038. struct htc_service_connect_req connect;
  3039. OS_MEMZERO(&connect, sizeof(connect));
  3040. OS_MEMZERO(&response, sizeof(response));
  3041. /* meta data is unused for now */
  3042. connect.pMetaData = NULL;
  3043. connect.MetaDataLength = 0;
  3044. /* these fields are the same for all service endpoints */
  3045. connect.EpCallbacks.pContext = soc;
  3046. connect.EpCallbacks.EpTxCompleteMultiple =
  3047. NULL /* Control path completion ar6000_tx_complete */;
  3048. connect.EpCallbacks.EpRecv = wmi_control_rx /* Control path rx */;
  3049. connect.EpCallbacks.EpRecvRefill = NULL /* ar6000_rx_refill */;
  3050. connect.EpCallbacks.EpSendFull = NULL /* ar6000_tx_queue_full */;
  3051. connect.EpCallbacks.EpTxComplete =
  3052. wmi_htc_tx_complete /* ar6000_tx_queue_full */;
  3053. connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
  3054. /* connect to control service */
  3055. connect.service_id = soc->svc_ids[pdev_idx];
  3056. status = htc_connect_service(soc->htc_handle, &connect, &response);
  3057. if (QDF_IS_STATUS_ERROR(status)) {
  3058. wmi_err("Failed to connect to WMI CONTROL service status:%d",
  3059. status);
  3060. return status;
  3061. }
  3062. if (soc->is_async_ep)
  3063. htc_set_async_ep(soc->htc_handle, response.Endpoint, true);
  3064. soc->wmi_endpoint_id[pdev_idx] = response.Endpoint;
  3065. soc->max_msg_len[pdev_idx] = response.MaxMsgLength;
  3066. return QDF_STATUS_SUCCESS;
  3067. }
  3068. QDF_STATUS
  3069. wmi_unified_connect_htc_service(struct wmi_unified *wmi_handle,
  3070. HTC_HANDLE htc_handle)
  3071. {
  3072. uint32_t i;
  3073. uint8_t wmi_ep_count;
  3074. wmi_handle->soc->htc_handle = htc_handle;
  3075. wmi_ep_count = htc_get_wmi_endpoint_count(htc_handle);
  3076. if (wmi_ep_count > WMI_MAX_RADIOS)
  3077. return QDF_STATUS_E_FAULT;
  3078. for (i = 0; i < wmi_ep_count; i++)
  3079. wmi_connect_pdev_htc_service(wmi_handle->soc, i);
  3080. wmi_handle->htc_handle = htc_handle;
  3081. wmi_handle->wmi_endpoint_id = wmi_handle->soc->wmi_endpoint_id[0];
  3082. wmi_handle->max_msg_len = wmi_handle->soc->max_msg_len[0];
  3083. return QDF_STATUS_SUCCESS;
  3084. }
  3085. #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
  3086. QDF_STATUS wmi_diag_connect_pdev_htc_service(struct wmi_unified *wmi_handle,
  3087. HTC_HANDLE htc_handle)
  3088. {
  3089. QDF_STATUS status;
  3090. struct htc_service_connect_resp response = {0};
  3091. struct htc_service_connect_req connect = {0};
  3092. /* meta data is unused for now */
  3093. connect.pMetaData = NULL;
  3094. connect.MetaDataLength = 0;
  3095. connect.EpCallbacks.pContext = wmi_handle->soc;
  3096. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  3097. connect.EpCallbacks.EpRecv = wmi_control_diag_rx /* wmi diag rx */;
  3098. connect.EpCallbacks.EpRecvRefill = NULL;
  3099. connect.EpCallbacks.EpSendFull = NULL;
  3100. connect.EpCallbacks.EpTxComplete = NULL;
  3101. connect.EpCallbacks.ep_log_pkt = wmi_htc_log_pkt;
  3102. /* connect to wmi diag service */
  3103. connect.service_id = WMI_CONTROL_DIAG_SVC;
  3104. status = htc_connect_service(htc_handle, &connect, &response);
  3105. if (QDF_IS_STATUS_ERROR(status)) {
  3106. wmi_err("Failed to connect to WMI DIAG service status:%d",
  3107. status);
  3108. return status;
  3109. }
  3110. if (wmi_handle->soc->is_async_ep)
  3111. htc_set_async_ep(htc_handle, response.Endpoint, true);
  3112. wmi_handle->soc->wmi_diag_endpoint_id = response.Endpoint;
  3113. return QDF_STATUS_SUCCESS;
  3114. }
  3115. #endif
  3116. /**
  3117. * wmi_get_host_credits() - WMI API to get updated host_credits
  3118. *
  3119. * @wmi_handle: handle to WMI.
  3120. *
  3121. * @Return: updated host_credits.
  3122. */
  3123. int wmi_get_host_credits(wmi_unified_t wmi_handle)
  3124. {
  3125. int host_credits = 0;
  3126. htc_get_control_endpoint_tx_host_credits(wmi_handle->htc_handle,
  3127. &host_credits);
  3128. return host_credits;
  3129. }
  3130. /**
  3131. * wmi_get_pending_cmds() - WMI API to get WMI Pending Commands in the HTC
  3132. * queue
  3133. *
  3134. * @wmi_handle: handle to WMI.
  3135. *
  3136. * @Return: Pending Commands in the HTC queue.
  3137. */
  3138. int wmi_get_pending_cmds(wmi_unified_t wmi_handle)
  3139. {
  3140. return qdf_atomic_read(&wmi_handle->pending_cmds);
  3141. }
  3142. /**
  3143. * wmi_set_target_suspend() - WMI API to set target suspend state
  3144. *
  3145. * @wmi_handle: handle to WMI.
  3146. * @val: suspend state boolean.
  3147. *
  3148. * @Return: none.
  3149. */
  3150. void wmi_set_target_suspend(wmi_unified_t wmi_handle, A_BOOL val)
  3151. {
  3152. qdf_atomic_set(&wmi_handle->is_target_suspended, val);
  3153. }
  3154. /**
  3155. * wmi_set_target_suspend_acked() - WMI API to set target suspend acked flag
  3156. *
  3157. * @wmi_handle: handle to WMI.
  3158. * @val: target suspend command acked flag.
  3159. *
  3160. * @Return: none.
  3161. */
  3162. void wmi_set_target_suspend_acked(wmi_unified_t wmi_handle, A_BOOL val)
  3163. {
  3164. qdf_atomic_set(&wmi_handle->is_target_suspend_acked, val);
  3165. }
  3166. /**
  3167. * wmi_is_target_suspended() - WMI API to check target suspend state
  3168. * @wmi_handle: handle to WMI.
  3169. *
  3170. * WMI API to check target suspend state
  3171. *
  3172. * Return: true if target is suspended, else false.
  3173. */
  3174. bool wmi_is_target_suspended(struct wmi_unified *wmi_handle)
  3175. {
  3176. return qdf_atomic_read(&wmi_handle->is_target_suspended);
  3177. }
  3178. qdf_export_symbol(wmi_is_target_suspended);
  3179. /**
  3180. * wmi_is_target_suspend_acked() - WMI API to check target suspend command is
  3181. * acked or not
  3182. * @wmi_handle: handle to WMI.
  3183. *
  3184. * WMI API to check whether the target suspend command is acked or not
  3185. *
  3186. * Return: true if target suspend command is acked, else false.
  3187. */
  3188. bool wmi_is_target_suspend_acked(struct wmi_unified *wmi_handle)
  3189. {
  3190. return qdf_atomic_read(&wmi_handle->is_target_suspend_acked);
  3191. }
  3192. qdf_export_symbol(wmi_is_target_suspend_acked);
  3193. #ifdef WLAN_FEATURE_WMI_SEND_RECV_QMI
  3194. void wmi_set_qmi_stats(wmi_unified_t wmi_handle, bool val)
  3195. {
  3196. wmi_handle->is_qmi_stats_enabled = val;
  3197. }
  3198. bool wmi_is_qmi_stats_enabled(struct wmi_unified *wmi_handle)
  3199. {
  3200. return wmi_handle->is_qmi_stats_enabled;
  3201. }
  3202. #endif
  3203. /**
  3204. * WMI API to set crash injection state
  3205. * @param wmi_handle: handle to WMI.
  3206. * @param val: crash injection state boolean.
  3207. */
  3208. void wmi_tag_crash_inject(wmi_unified_t wmi_handle, A_BOOL flag)
  3209. {
  3210. wmi_handle->tag_crash_inject = flag;
  3211. }
  3212. /**
  3213. * WMI API to set bus suspend state
  3214. * @param wmi_handle: handle to WMI.
  3215. * @param val: suspend state boolean.
  3216. */
  3217. void wmi_set_is_wow_bus_suspended(wmi_unified_t wmi_handle, A_BOOL val)
  3218. {
  3219. qdf_atomic_set(&wmi_handle->is_wow_bus_suspended, val);
  3220. }
  3221. void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
  3222. {
  3223. wmi_handle->tgt_force_assert_enable = val;
  3224. }
  3225. /**
  3226. * wmi_stop() - generic function to block unified WMI command
  3227. * @wmi_handle: handle to WMI.
  3228. *
  3229. * @Return: success always.
  3230. */
  3231. int
  3232. wmi_stop(wmi_unified_t wmi_handle)
  3233. {
  3234. QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
  3235. "WMI Stop");
  3236. wmi_handle->wmi_stopinprogress = 1;
  3237. return 0;
  3238. }
  3239. /**
  3240. * wmi_start() - generic function to allow unified WMI command
  3241. * @wmi_handle: handle to WMI.
  3242. *
  3243. * @Return: success always.
  3244. */
  3245. int
  3246. wmi_start(wmi_unified_t wmi_handle)
  3247. {
  3248. QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
  3249. "WMI Start");
  3250. wmi_handle->wmi_stopinprogress = 0;
  3251. return 0;
  3252. }
  3253. /**
  3254. * wmi_is_blocked() - generic function to check if WMI is blocked
  3255. * @wmi_handle: handle to WMI.
  3256. *
  3257. * @Return: true, if blocked, false if not blocked
  3258. */
  3259. bool
  3260. wmi_is_blocked(wmi_unified_t wmi_handle)
  3261. {
  3262. return (!(!wmi_handle->wmi_stopinprogress));
  3263. }
  3264. /**
  3265. * API to flush all the previous packets associated with the wmi endpoint
  3266. *
  3267. * @param wmi_handle : handle to WMI.
  3268. */
  3269. void
  3270. wmi_flush_endpoint(wmi_unified_t wmi_handle)
  3271. {
  3272. htc_flush_endpoint(wmi_handle->htc_handle,
  3273. wmi_handle->wmi_endpoint_id, 0);
  3274. }
  3275. qdf_export_symbol(wmi_flush_endpoint);
  3276. /**
  3277. * wmi_pdev_id_conversion_enable() - API to enable pdev_id/phy_id conversion
  3278. * in WMI. By default pdev_id conversion is not done in WMI.
  3279. * This API can be used enable conversion in WMI.
  3280. * @param wmi_handle : handle to WMI
  3281. * @param pdev_map : pointer to pdev_map
  3282. * @size : size of pdev_id_map
  3283. * Return none
  3284. */
  3285. void wmi_pdev_id_conversion_enable(wmi_unified_t wmi_handle,
  3286. uint32_t *pdev_id_map,
  3287. uint8_t size)
  3288. {
  3289. if (wmi_handle->target_type == WMI_TLV_TARGET)
  3290. wmi_handle->ops->wmi_pdev_id_conversion_enable(wmi_handle,
  3291. pdev_id_map,
  3292. size);
  3293. }
  3294. int __wmi_validate_handle(wmi_unified_t wmi_handle, const char *func)
  3295. {
  3296. if (!wmi_handle) {
  3297. wmi_err("Invalid WMI handle (via %s)", func);
  3298. return -EINVAL;
  3299. }
  3300. return 0;
  3301. }