wmi-tlv.c 134 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (c) 2005-2011 Atheros Communications Inc.
  4. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  5. * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  6. */
  7. #include "core.h"
  8. #include "debug.h"
  9. #include "mac.h"
  10. #include "hw.h"
  11. #include "wmi.h"
  12. #include "wmi-ops.h"
  13. #include "wmi-tlv.h"
  14. #include "p2p.h"
  15. #include "testmode.h"
  16. #include <linux/bitfield.h>
  17. /***************/
  18. /* TLV helpers */
  19. /**************/
  20. struct wmi_tlv_policy {
  21. size_t min_len;
  22. };
  23. static const struct wmi_tlv_policy wmi_tlv_policies[] = {
  24. [WMI_TLV_TAG_ARRAY_BYTE]
  25. = { .min_len = 0 },
  26. [WMI_TLV_TAG_ARRAY_UINT32]
  27. = { .min_len = 0 },
  28. [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
  29. = { .min_len = sizeof(struct wmi_scan_event) },
  30. [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
  31. = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
  32. [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
  33. = { .min_len = sizeof(struct wmi_chan_info_event) },
  34. [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
  35. = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
  36. [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
  37. = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
  38. [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
  39. = { .min_len = sizeof(struct wmi_host_swba_event) },
  40. [WMI_TLV_TAG_STRUCT_TIM_INFO]
  41. = { .min_len = sizeof(struct wmi_tim_info) },
  42. [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
  43. = { .min_len = sizeof(struct wmi_p2p_noa_info) },
  44. [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
  45. = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
  46. [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
  47. = { .min_len = sizeof(struct hal_reg_capabilities) },
  48. [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
  49. = { .min_len = sizeof(struct wlan_host_mem_req) },
  50. [WMI_TLV_TAG_STRUCT_READY_EVENT]
  51. = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
  52. [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
  53. = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
  54. [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
  55. = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
  56. [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
  57. = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
  58. [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
  59. = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
  60. [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
  61. = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
  62. [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
  63. = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
  64. };
  65. static int
  66. ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
  67. int (*iter)(struct ath10k *ar, u16 tag, u16 len,
  68. const void *ptr, void *data),
  69. void *data)
  70. {
  71. const void *begin = ptr;
  72. const struct wmi_tlv *tlv;
  73. u16 tlv_tag, tlv_len;
  74. int ret;
  75. while (len > 0) {
  76. if (len < sizeof(*tlv)) {
  77. ath10k_dbg(ar, ATH10K_DBG_WMI,
  78. "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
  79. ptr - begin, len, sizeof(*tlv));
  80. return -EINVAL;
  81. }
  82. tlv = ptr;
  83. tlv_tag = __le16_to_cpu(tlv->tag);
  84. tlv_len = __le16_to_cpu(tlv->len);
  85. ptr += sizeof(*tlv);
  86. len -= sizeof(*tlv);
  87. if (tlv_len > len) {
  88. ath10k_dbg(ar, ATH10K_DBG_WMI,
  89. "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
  90. tlv_tag, ptr - begin, len, tlv_len);
  91. return -EINVAL;
  92. }
  93. if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
  94. wmi_tlv_policies[tlv_tag].min_len &&
  95. wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
  96. ath10k_dbg(ar, ATH10K_DBG_WMI,
  97. "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
  98. tlv_tag, ptr - begin, tlv_len,
  99. wmi_tlv_policies[tlv_tag].min_len);
  100. return -EINVAL;
  101. }
  102. ret = iter(ar, tlv_tag, tlv_len, ptr, data);
  103. if (ret)
  104. return ret;
  105. ptr += tlv_len;
  106. len -= tlv_len;
  107. }
  108. return 0;
  109. }
  110. static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
  111. const void *ptr, void *data)
  112. {
  113. const void **tb = data;
  114. if (tag < WMI_TLV_TAG_MAX)
  115. tb[tag] = ptr;
  116. return 0;
  117. }
  118. static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
  119. const void *ptr, size_t len)
  120. {
  121. return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
  122. (void *)tb);
  123. }
  124. static const void **
  125. ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
  126. size_t len, gfp_t gfp)
  127. {
  128. const void **tb;
  129. int ret;
  130. tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
  131. if (!tb)
  132. return ERR_PTR(-ENOMEM);
  133. ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
  134. if (ret) {
  135. kfree(tb);
  136. return ERR_PTR(ret);
  137. }
  138. return tb;
  139. }
  140. static u16 ath10k_wmi_tlv_len(const void *ptr)
  141. {
  142. return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
  143. }
  144. /**************/
  145. /* TLV events */
  146. /**************/
  147. static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
  148. struct sk_buff *skb)
  149. {
  150. const void **tb;
  151. const struct wmi_tlv_bcn_tx_status_ev *ev;
  152. struct ath10k_vif *arvif;
  153. u32 vdev_id, tx_status;
  154. int ret;
  155. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  156. if (IS_ERR(tb)) {
  157. ret = PTR_ERR(tb);
  158. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  159. return ret;
  160. }
  161. ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
  162. if (!ev) {
  163. kfree(tb);
  164. return -EPROTO;
  165. }
  166. tx_status = __le32_to_cpu(ev->tx_status);
  167. vdev_id = __le32_to_cpu(ev->vdev_id);
  168. switch (tx_status) {
  169. case WMI_TLV_BCN_TX_STATUS_OK:
  170. break;
  171. case WMI_TLV_BCN_TX_STATUS_XRETRY:
  172. case WMI_TLV_BCN_TX_STATUS_DROP:
  173. case WMI_TLV_BCN_TX_STATUS_FILTERED:
  174. /* FIXME: It's probably worth telling mac80211 to stop the
  175. * interface as it is crippled.
  176. */
  177. ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
  178. vdev_id, tx_status);
  179. break;
  180. }
  181. arvif = ath10k_get_arvif(ar, vdev_id);
  182. if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active)
  183. ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
  184. kfree(tb);
  185. return 0;
  186. }
  187. static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
  188. struct sk_buff *skb)
  189. {
  190. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n");
  191. complete(&ar->vdev_delete_done);
  192. }
  193. static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
  194. const void *ptr, void *data)
  195. {
  196. const struct wmi_tlv_peer_stats_info *stat = ptr;
  197. struct ieee80211_sta *sta;
  198. struct ath10k_sta *arsta;
  199. if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
  200. return -EPROTO;
  201. ath10k_dbg(ar, ATH10K_DBG_WMI,
  202. "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
  203. stat->peer_macaddr.addr,
  204. __le32_to_cpu(stat->last_rx_rate_code),
  205. __le32_to_cpu(stat->last_rx_bitrate_kbps));
  206. ath10k_dbg(ar, ATH10K_DBG_WMI,
  207. "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
  208. __le32_to_cpu(stat->last_tx_rate_code),
  209. __le32_to_cpu(stat->last_tx_bitrate_kbps));
  210. rcu_read_lock();
  211. sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
  212. if (!sta) {
  213. rcu_read_unlock();
  214. ath10k_warn(ar, "not found station for peer stats\n");
  215. return -EINVAL;
  216. }
  217. arsta = (struct ath10k_sta *)sta->drv_priv;
  218. arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
  219. arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
  220. arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
  221. arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
  222. rcu_read_unlock();
  223. return 0;
  224. }
  225. static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
  226. struct sk_buff *skb)
  227. {
  228. const void **tb;
  229. const struct wmi_tlv_peer_stats_info_ev *ev;
  230. const void *data;
  231. u32 num_peer_stats;
  232. int ret;
  233. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  234. if (IS_ERR(tb)) {
  235. ret = PTR_ERR(tb);
  236. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  237. return ret;
  238. }
  239. ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
  240. data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
  241. if (!ev || !data) {
  242. kfree(tb);
  243. return -EPROTO;
  244. }
  245. num_peer_stats = __le32_to_cpu(ev->num_peers);
  246. ath10k_dbg(ar, ATH10K_DBG_WMI,
  247. "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
  248. __le32_to_cpu(ev->vdev_id),
  249. num_peer_stats,
  250. __le32_to_cpu(ev->more_data));
  251. ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
  252. ath10k_wmi_tlv_parse_peer_stats_info, NULL);
  253. if (ret)
  254. ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
  255. kfree(tb);
  256. return 0;
  257. }
  258. static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
  259. struct sk_buff *skb)
  260. {
  261. ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
  262. ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
  263. complete(&ar->peer_stats_info_complete);
  264. }
  265. static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
  266. struct sk_buff *skb)
  267. {
  268. const void **tb;
  269. const struct wmi_tlv_diag_data_ev *ev;
  270. const struct wmi_tlv_diag_item *item;
  271. const void *data;
  272. int ret, num_items, len;
  273. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  274. if (IS_ERR(tb)) {
  275. ret = PTR_ERR(tb);
  276. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  277. return ret;
  278. }
  279. ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
  280. data = tb[WMI_TLV_TAG_ARRAY_BYTE];
  281. if (!ev || !data) {
  282. kfree(tb);
  283. return -EPROTO;
  284. }
  285. num_items = __le32_to_cpu(ev->num_items);
  286. len = ath10k_wmi_tlv_len(data);
  287. while (num_items--) {
  288. if (len == 0)
  289. break;
  290. if (len < sizeof(*item)) {
  291. ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
  292. break;
  293. }
  294. item = data;
  295. if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
  296. ath10k_warn(ar, "failed to parse diag data: item is too long\n");
  297. break;
  298. }
  299. trace_ath10k_wmi_diag_container(ar,
  300. item->type,
  301. __le32_to_cpu(item->timestamp),
  302. __le32_to_cpu(item->code),
  303. __le16_to_cpu(item->len),
  304. item->payload);
  305. len -= sizeof(*item);
  306. len -= roundup(__le16_to_cpu(item->len), 4);
  307. data += sizeof(*item);
  308. data += roundup(__le16_to_cpu(item->len), 4);
  309. }
  310. if (num_items != -1 || len != 0)
  311. ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
  312. num_items, len);
  313. kfree(tb);
  314. return 0;
  315. }
  316. static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
  317. struct sk_buff *skb)
  318. {
  319. const void **tb;
  320. const void *data;
  321. int ret, len;
  322. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  323. if (IS_ERR(tb)) {
  324. ret = PTR_ERR(tb);
  325. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  326. return ret;
  327. }
  328. data = tb[WMI_TLV_TAG_ARRAY_BYTE];
  329. if (!data) {
  330. kfree(tb);
  331. return -EPROTO;
  332. }
  333. len = ath10k_wmi_tlv_len(data);
  334. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
  335. trace_ath10k_wmi_diag(ar, data, len);
  336. kfree(tb);
  337. return 0;
  338. }
  339. static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
  340. struct sk_buff *skb)
  341. {
  342. const void **tb;
  343. const struct wmi_tlv_p2p_noa_ev *ev;
  344. const struct wmi_p2p_noa_info *noa;
  345. int ret, vdev_id;
  346. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  347. if (IS_ERR(tb)) {
  348. ret = PTR_ERR(tb);
  349. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  350. return ret;
  351. }
  352. ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
  353. noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
  354. if (!ev || !noa) {
  355. kfree(tb);
  356. return -EPROTO;
  357. }
  358. vdev_id = __le32_to_cpu(ev->vdev_id);
  359. ath10k_dbg(ar, ATH10K_DBG_WMI,
  360. "wmi tlv p2p noa vdev_id %i descriptors %u\n",
  361. vdev_id, noa->num_descriptors);
  362. ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
  363. kfree(tb);
  364. return 0;
  365. }
  366. static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
  367. struct sk_buff *skb)
  368. {
  369. const void **tb;
  370. const struct wmi_tlv_tx_pause_ev *ev;
  371. int ret, vdev_id;
  372. u32 pause_id, action, vdev_map, peer_id, tid_map;
  373. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  374. if (IS_ERR(tb)) {
  375. ret = PTR_ERR(tb);
  376. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  377. return ret;
  378. }
  379. ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
  380. if (!ev) {
  381. kfree(tb);
  382. return -EPROTO;
  383. }
  384. pause_id = __le32_to_cpu(ev->pause_id);
  385. action = __le32_to_cpu(ev->action);
  386. vdev_map = __le32_to_cpu(ev->vdev_map);
  387. peer_id = __le32_to_cpu(ev->peer_id);
  388. tid_map = __le32_to_cpu(ev->tid_map);
  389. ath10k_dbg(ar, ATH10K_DBG_WMI,
  390. "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
  391. pause_id, action, vdev_map, peer_id, tid_map);
  392. switch (pause_id) {
  393. case WMI_TLV_TX_PAUSE_ID_MCC:
  394. case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
  395. case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
  396. case WMI_TLV_TX_PAUSE_ID_AP_PS:
  397. case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
  398. for (vdev_id = 0; vdev_map; vdev_id++) {
  399. if (!(vdev_map & BIT(vdev_id)))
  400. continue;
  401. vdev_map &= ~BIT(vdev_id);
  402. ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
  403. action);
  404. }
  405. break;
  406. case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
  407. case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
  408. case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
  409. case WMI_TLV_TX_PAUSE_ID_HOST:
  410. ath10k_dbg(ar, ATH10K_DBG_MAC,
  411. "mac ignoring unsupported tx pause id %d\n",
  412. pause_id);
  413. break;
  414. default:
  415. ath10k_dbg(ar, ATH10K_DBG_MAC,
  416. "mac ignoring unknown tx pause vdev %d\n",
  417. pause_id);
  418. break;
  419. }
  420. kfree(tb);
  421. return 0;
  422. }
  423. static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
  424. struct sk_buff *skb)
  425. {
  426. const struct wmi_tlv_rfkill_state_change_ev *ev;
  427. const void **tb;
  428. bool radio;
  429. int ret;
  430. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  431. if (IS_ERR(tb)) {
  432. ret = PTR_ERR(tb);
  433. ath10k_warn(ar,
  434. "failed to parse rfkill state change event: %d\n",
  435. ret);
  436. return;
  437. }
  438. ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
  439. if (!ev) {
  440. kfree(tb);
  441. return;
  442. }
  443. ath10k_dbg(ar, ATH10K_DBG_MAC,
  444. "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
  445. __le32_to_cpu(ev->gpio_pin_num),
  446. __le32_to_cpu(ev->int_type),
  447. __le32_to_cpu(ev->radio_state));
  448. radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
  449. spin_lock_bh(&ar->data_lock);
  450. if (!radio)
  451. ar->hw_rfkill_on = true;
  452. spin_unlock_bh(&ar->data_lock);
  453. /* notify cfg80211 radio state change */
  454. ath10k_mac_rfkill_enable_radio(ar, radio);
  455. wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
  456. }
  457. static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
  458. struct sk_buff *skb)
  459. {
  460. const struct wmi_tlv_pdev_temperature_event *ev;
  461. ev = (struct wmi_tlv_pdev_temperature_event *)skb->data;
  462. if (WARN_ON(skb->len < sizeof(*ev)))
  463. return -EPROTO;
  464. ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
  465. return 0;
  466. }
  467. static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
  468. {
  469. struct ieee80211_sta *station;
  470. const struct wmi_tlv_tdls_peer_event *ev;
  471. const void **tb;
  472. struct ath10k_vif *arvif;
  473. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  474. if (IS_ERR(tb)) {
  475. ath10k_warn(ar, "tdls peer failed to parse tlv");
  476. return;
  477. }
  478. ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT];
  479. if (!ev) {
  480. kfree(tb);
  481. ath10k_warn(ar, "tdls peer NULL event");
  482. return;
  483. }
  484. switch (__le32_to_cpu(ev->peer_reason)) {
  485. case WMI_TDLS_TEARDOWN_REASON_TX:
  486. case WMI_TDLS_TEARDOWN_REASON_RSSI:
  487. case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
  488. rcu_read_lock();
  489. station = ieee80211_find_sta_by_ifaddr(ar->hw,
  490. ev->peer_macaddr.addr,
  491. NULL);
  492. if (!station) {
  493. ath10k_warn(ar, "did not find station from tdls peer event");
  494. goto exit;
  495. }
  496. arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
  497. ieee80211_tdls_oper_request(
  498. arvif->vif, station->addr,
  499. NL80211_TDLS_TEARDOWN,
  500. WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
  501. GFP_ATOMIC
  502. );
  503. break;
  504. default:
  505. kfree(tb);
  506. return;
  507. }
  508. exit:
  509. rcu_read_unlock();
  510. kfree(tb);
  511. }
  512. static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
  513. struct sk_buff *skb)
  514. {
  515. struct wmi_peer_delete_resp_ev_arg *arg;
  516. struct wmi_tlv *tlv_hdr;
  517. tlv_hdr = (struct wmi_tlv *)skb->data;
  518. arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value;
  519. ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id);
  520. ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr);
  521. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n");
  522. complete(&ar->peer_delete_done);
  523. return 0;
  524. }
  525. /***********/
  526. /* TLV ops */
  527. /***********/
  528. static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
  529. {
  530. struct wmi_cmd_hdr *cmd_hdr;
  531. enum wmi_tlv_event_id id;
  532. bool consumed;
  533. cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
  534. id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
  535. if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
  536. goto out;
  537. trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
  538. consumed = ath10k_tm_event_wmi(ar, id, skb);
  539. /* Ready event must be handled normally also in UTF mode so that we
  540. * know the UTF firmware has booted, others we are just bypass WMI
  541. * events to testmode.
  542. */
  543. if (consumed && id != WMI_TLV_READY_EVENTID) {
  544. ath10k_dbg(ar, ATH10K_DBG_WMI,
  545. "wmi tlv testmode consumed 0x%x\n", id);
  546. goto out;
  547. }
  548. switch (id) {
  549. case WMI_TLV_MGMT_RX_EVENTID:
  550. ath10k_wmi_event_mgmt_rx(ar, skb);
  551. /* mgmt_rx() owns the skb now! */
  552. return;
  553. case WMI_TLV_SCAN_EVENTID:
  554. ath10k_wmi_event_scan(ar, skb);
  555. break;
  556. case WMI_TLV_CHAN_INFO_EVENTID:
  557. ath10k_wmi_event_chan_info(ar, skb);
  558. break;
  559. case WMI_TLV_ECHO_EVENTID:
  560. ath10k_wmi_event_echo(ar, skb);
  561. break;
  562. case WMI_TLV_DEBUG_MESG_EVENTID:
  563. ath10k_wmi_event_debug_mesg(ar, skb);
  564. break;
  565. case WMI_TLV_UPDATE_STATS_EVENTID:
  566. ath10k_wmi_event_update_stats(ar, skb);
  567. break;
  568. case WMI_TLV_PEER_STATS_INFO_EVENTID:
  569. ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
  570. break;
  571. case WMI_TLV_VDEV_START_RESP_EVENTID:
  572. ath10k_wmi_event_vdev_start_resp(ar, skb);
  573. break;
  574. case WMI_TLV_VDEV_STOPPED_EVENTID:
  575. ath10k_wmi_event_vdev_stopped(ar, skb);
  576. break;
  577. case WMI_TLV_VDEV_DELETE_RESP_EVENTID:
  578. ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb);
  579. break;
  580. case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
  581. ath10k_wmi_event_peer_sta_kickout(ar, skb);
  582. break;
  583. case WMI_TLV_HOST_SWBA_EVENTID:
  584. ath10k_wmi_event_host_swba(ar, skb);
  585. break;
  586. case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
  587. ath10k_wmi_event_tbttoffset_update(ar, skb);
  588. break;
  589. case WMI_TLV_PHYERR_EVENTID:
  590. ath10k_wmi_event_phyerr(ar, skb);
  591. break;
  592. case WMI_TLV_ROAM_EVENTID:
  593. ath10k_wmi_event_roam(ar, skb);
  594. break;
  595. case WMI_TLV_PROFILE_MATCH:
  596. ath10k_wmi_event_profile_match(ar, skb);
  597. break;
  598. case WMI_TLV_DEBUG_PRINT_EVENTID:
  599. ath10k_wmi_event_debug_print(ar, skb);
  600. break;
  601. case WMI_TLV_PDEV_QVIT_EVENTID:
  602. ath10k_wmi_event_pdev_qvit(ar, skb);
  603. break;
  604. case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
  605. ath10k_wmi_event_wlan_profile_data(ar, skb);
  606. break;
  607. case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
  608. ath10k_wmi_event_rtt_measurement_report(ar, skb);
  609. break;
  610. case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
  611. ath10k_wmi_event_tsf_measurement_report(ar, skb);
  612. break;
  613. case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
  614. ath10k_wmi_event_rtt_error_report(ar, skb);
  615. break;
  616. case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
  617. ath10k_wmi_event_wow_wakeup_host(ar, skb);
  618. break;
  619. case WMI_TLV_DCS_INTERFERENCE_EVENTID:
  620. ath10k_wmi_event_dcs_interference(ar, skb);
  621. break;
  622. case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
  623. ath10k_wmi_event_pdev_tpc_config(ar, skb);
  624. break;
  625. case WMI_TLV_PDEV_FTM_INTG_EVENTID:
  626. ath10k_wmi_event_pdev_ftm_intg(ar, skb);
  627. break;
  628. case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
  629. ath10k_wmi_event_gtk_offload_status(ar, skb);
  630. break;
  631. case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
  632. ath10k_wmi_event_gtk_rekey_fail(ar, skb);
  633. break;
  634. case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
  635. ath10k_wmi_event_delba_complete(ar, skb);
  636. break;
  637. case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
  638. ath10k_wmi_event_addba_complete(ar, skb);
  639. break;
  640. case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
  641. ath10k_wmi_event_vdev_install_key_complete(ar, skb);
  642. break;
  643. case WMI_TLV_SERVICE_READY_EVENTID:
  644. ath10k_wmi_event_service_ready(ar, skb);
  645. return;
  646. case WMI_TLV_READY_EVENTID:
  647. ath10k_wmi_event_ready(ar, skb);
  648. break;
  649. case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
  650. ath10k_wmi_event_service_available(ar, skb);
  651. break;
  652. case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
  653. ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
  654. break;
  655. case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
  656. ath10k_wmi_tlv_event_diag_data(ar, skb);
  657. break;
  658. case WMI_TLV_DIAG_EVENTID:
  659. ath10k_wmi_tlv_event_diag(ar, skb);
  660. break;
  661. case WMI_TLV_P2P_NOA_EVENTID:
  662. ath10k_wmi_tlv_event_p2p_noa(ar, skb);
  663. break;
  664. case WMI_TLV_TX_PAUSE_EVENTID:
  665. ath10k_wmi_tlv_event_tx_pause(ar, skb);
  666. break;
  667. case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
  668. ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
  669. break;
  670. case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
  671. ath10k_wmi_tlv_event_temperature(ar, skb);
  672. break;
  673. case WMI_TLV_TDLS_PEER_EVENTID:
  674. ath10k_wmi_event_tdls_peer(ar, skb);
  675. break;
  676. case WMI_TLV_PEER_DELETE_RESP_EVENTID:
  677. ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
  678. break;
  679. case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
  680. ath10k_wmi_event_mgmt_tx_compl(ar, skb);
  681. break;
  682. case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID:
  683. ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb);
  684. break;
  685. default:
  686. ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
  687. break;
  688. }
  689. out:
  690. dev_kfree_skb(skb);
  691. }
  692. static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
  693. struct sk_buff *skb,
  694. struct wmi_scan_ev_arg *arg)
  695. {
  696. const void **tb;
  697. const struct wmi_scan_event *ev;
  698. int ret;
  699. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  700. if (IS_ERR(tb)) {
  701. ret = PTR_ERR(tb);
  702. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  703. return ret;
  704. }
  705. ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
  706. if (!ev) {
  707. kfree(tb);
  708. return -EPROTO;
  709. }
  710. arg->event_type = ev->event_type;
  711. arg->reason = ev->reason;
  712. arg->channel_freq = ev->channel_freq;
  713. arg->scan_req_id = ev->scan_req_id;
  714. arg->scan_id = ev->scan_id;
  715. arg->vdev_id = ev->vdev_id;
  716. kfree(tb);
  717. return 0;
  718. }
  719. static int
  720. ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
  721. struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
  722. {
  723. const void **tb;
  724. const struct wmi_tlv_mgmt_tx_compl_ev *ev;
  725. int ret;
  726. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  727. if (IS_ERR(tb)) {
  728. ret = PTR_ERR(tb);
  729. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  730. return ret;
  731. }
  732. ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
  733. arg->desc_id = ev->desc_id;
  734. arg->status = ev->status;
  735. arg->pdev_id = ev->pdev_id;
  736. arg->ppdu_id = ev->ppdu_id;
  737. if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
  738. arg->ack_rssi = ev->ack_rssi;
  739. kfree(tb);
  740. return 0;
  741. }
  742. struct wmi_tlv_tx_bundle_compl_parse {
  743. const __le32 *num_reports;
  744. const __le32 *desc_ids;
  745. const __le32 *status;
  746. const __le32 *ppdu_ids;
  747. const __le32 *ack_rssi;
  748. bool desc_ids_done;
  749. bool status_done;
  750. bool ppdu_ids_done;
  751. bool ack_rssi_done;
  752. };
  753. static int
  754. ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len,
  755. const void *ptr, void *data)
  756. {
  757. struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data;
  758. switch (tag) {
  759. case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT:
  760. bundle_tx_compl->num_reports = ptr;
  761. break;
  762. case WMI_TLV_TAG_ARRAY_UINT32:
  763. if (!bundle_tx_compl->desc_ids_done) {
  764. bundle_tx_compl->desc_ids_done = true;
  765. bundle_tx_compl->desc_ids = ptr;
  766. } else if (!bundle_tx_compl->status_done) {
  767. bundle_tx_compl->status_done = true;
  768. bundle_tx_compl->status = ptr;
  769. } else if (!bundle_tx_compl->ppdu_ids_done) {
  770. bundle_tx_compl->ppdu_ids_done = true;
  771. bundle_tx_compl->ppdu_ids = ptr;
  772. } else if (!bundle_tx_compl->ack_rssi_done) {
  773. bundle_tx_compl->ack_rssi_done = true;
  774. bundle_tx_compl->ack_rssi = ptr;
  775. }
  776. break;
  777. default:
  778. break;
  779. }
  780. return 0;
  781. }
  782. static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev(
  783. struct ath10k *ar, struct sk_buff *skb,
  784. struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
  785. {
  786. struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { };
  787. int ret;
  788. ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
  789. ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse,
  790. &bundle_tx_compl);
  791. if (ret) {
  792. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  793. return ret;
  794. }
  795. if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids ||
  796. !bundle_tx_compl.status)
  797. return -EPROTO;
  798. arg->num_reports = *bundle_tx_compl.num_reports;
  799. arg->desc_ids = bundle_tx_compl.desc_ids;
  800. arg->status = bundle_tx_compl.status;
  801. arg->ppdu_ids = bundle_tx_compl.ppdu_ids;
  802. if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
  803. arg->ack_rssi = bundle_tx_compl.ack_rssi;
  804. return 0;
  805. }
  806. static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
  807. struct sk_buff *skb,
  808. struct wmi_mgmt_rx_ev_arg *arg)
  809. {
  810. const void **tb;
  811. const struct wmi_tlv_mgmt_rx_ev *ev;
  812. const u8 *frame;
  813. u32 msdu_len;
  814. int ret, i;
  815. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  816. if (IS_ERR(tb)) {
  817. ret = PTR_ERR(tb);
  818. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  819. return ret;
  820. }
  821. ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
  822. frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
  823. if (!ev || !frame) {
  824. kfree(tb);
  825. return -EPROTO;
  826. }
  827. arg->channel = ev->channel;
  828. arg->buf_len = ev->buf_len;
  829. arg->status = ev->status;
  830. arg->snr = ev->snr;
  831. arg->phy_mode = ev->phy_mode;
  832. arg->rate = ev->rate;
  833. for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
  834. arg->rssi[i] = ev->rssi[i];
  835. msdu_len = __le32_to_cpu(arg->buf_len);
  836. if (skb->len < (frame - skb->data) + msdu_len) {
  837. kfree(tb);
  838. return -EPROTO;
  839. }
  840. /* shift the sk_buff to point to `frame` */
  841. skb_trim(skb, 0);
  842. skb_put(skb, frame - skb->data);
  843. skb_pull(skb, frame - skb->data);
  844. skb_put(skb, msdu_len);
  845. kfree(tb);
  846. return 0;
  847. }
  848. static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
  849. struct sk_buff *skb,
  850. struct wmi_ch_info_ev_arg *arg)
  851. {
  852. const void **tb;
  853. const struct wmi_tlv_chan_info_event *ev;
  854. int ret;
  855. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  856. if (IS_ERR(tb)) {
  857. ret = PTR_ERR(tb);
  858. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  859. return ret;
  860. }
  861. ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
  862. if (!ev) {
  863. kfree(tb);
  864. return -EPROTO;
  865. }
  866. arg->err_code = ev->err_code;
  867. arg->freq = ev->freq;
  868. arg->cmd_flags = ev->cmd_flags;
  869. arg->noise_floor = ev->noise_floor;
  870. arg->rx_clear_count = ev->rx_clear_count;
  871. arg->cycle_count = ev->cycle_count;
  872. if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
  873. ar->running_fw->fw_file.fw_features))
  874. arg->mac_clk_mhz = ev->mac_clk_mhz;
  875. kfree(tb);
  876. return 0;
  877. }
  878. static int
  879. ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
  880. struct wmi_vdev_start_ev_arg *arg)
  881. {
  882. const void **tb;
  883. const struct wmi_vdev_start_response_event *ev;
  884. int ret;
  885. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  886. if (IS_ERR(tb)) {
  887. ret = PTR_ERR(tb);
  888. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  889. return ret;
  890. }
  891. ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
  892. if (!ev) {
  893. kfree(tb);
  894. return -EPROTO;
  895. }
  896. skb_pull(skb, sizeof(*ev));
  897. arg->vdev_id = ev->vdev_id;
  898. arg->req_id = ev->req_id;
  899. arg->resp_type = ev->resp_type;
  900. arg->status = ev->status;
  901. kfree(tb);
  902. return 0;
  903. }
  904. static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
  905. struct sk_buff *skb,
  906. struct wmi_peer_kick_ev_arg *arg)
  907. {
  908. const void **tb;
  909. const struct wmi_peer_sta_kickout_event *ev;
  910. int ret;
  911. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  912. if (IS_ERR(tb)) {
  913. ret = PTR_ERR(tb);
  914. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  915. return ret;
  916. }
  917. ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
  918. if (!ev) {
  919. kfree(tb);
  920. return -EPROTO;
  921. }
  922. arg->mac_addr = ev->peer_macaddr.addr;
  923. kfree(tb);
  924. return 0;
  925. }
  926. struct wmi_tlv_swba_parse {
  927. const struct wmi_host_swba_event *ev;
  928. bool tim_done;
  929. bool noa_done;
  930. size_t n_tim;
  931. size_t n_noa;
  932. struct wmi_swba_ev_arg *arg;
  933. };
  934. static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
  935. const void *ptr, void *data)
  936. {
  937. struct wmi_tlv_swba_parse *swba = data;
  938. struct wmi_tim_info_arg *tim_info_arg;
  939. const struct wmi_tim_info *tim_info_ev = ptr;
  940. if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
  941. return -EPROTO;
  942. if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
  943. return -ENOBUFS;
  944. if (__le32_to_cpu(tim_info_ev->tim_len) >
  945. sizeof(tim_info_ev->tim_bitmap)) {
  946. ath10k_warn(ar, "refusing to parse invalid swba structure\n");
  947. return -EPROTO;
  948. }
  949. tim_info_arg = &swba->arg->tim_info[swba->n_tim];
  950. tim_info_arg->tim_len = tim_info_ev->tim_len;
  951. tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
  952. tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
  953. tim_info_arg->tim_changed = tim_info_ev->tim_changed;
  954. tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
  955. swba->n_tim++;
  956. return 0;
  957. }
  958. static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
  959. const void *ptr, void *data)
  960. {
  961. struct wmi_tlv_swba_parse *swba = data;
  962. if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
  963. return -EPROTO;
  964. if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
  965. return -ENOBUFS;
  966. swba->arg->noa_info[swba->n_noa++] = ptr;
  967. return 0;
  968. }
  969. static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
  970. const void *ptr, void *data)
  971. {
  972. struct wmi_tlv_swba_parse *swba = data;
  973. int ret;
  974. switch (tag) {
  975. case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
  976. swba->ev = ptr;
  977. break;
  978. case WMI_TLV_TAG_ARRAY_STRUCT:
  979. if (!swba->tim_done) {
  980. swba->tim_done = true;
  981. ret = ath10k_wmi_tlv_iter(ar, ptr, len,
  982. ath10k_wmi_tlv_swba_tim_parse,
  983. swba);
  984. if (ret)
  985. return ret;
  986. } else if (!swba->noa_done) {
  987. swba->noa_done = true;
  988. ret = ath10k_wmi_tlv_iter(ar, ptr, len,
  989. ath10k_wmi_tlv_swba_noa_parse,
  990. swba);
  991. if (ret)
  992. return ret;
  993. }
  994. break;
  995. default:
  996. break;
  997. }
  998. return 0;
  999. }
  1000. static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
  1001. struct sk_buff *skb,
  1002. struct wmi_swba_ev_arg *arg)
  1003. {
  1004. struct wmi_tlv_swba_parse swba = { .arg = arg };
  1005. u32 map;
  1006. size_t n_vdevs;
  1007. int ret;
  1008. ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
  1009. ath10k_wmi_tlv_swba_parse, &swba);
  1010. if (ret) {
  1011. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  1012. return ret;
  1013. }
  1014. if (!swba.ev)
  1015. return -EPROTO;
  1016. arg->vdev_map = swba.ev->vdev_map;
  1017. for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
  1018. if (map & BIT(0))
  1019. n_vdevs++;
  1020. if (n_vdevs != swba.n_tim ||
  1021. n_vdevs != swba.n_noa)
  1022. return -EPROTO;
  1023. return 0;
  1024. }
  1025. static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
  1026. struct sk_buff *skb,
  1027. struct wmi_phyerr_hdr_arg *arg)
  1028. {
  1029. const void **tb;
  1030. const struct wmi_tlv_phyerr_ev *ev;
  1031. const void *phyerrs;
  1032. int ret;
  1033. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  1034. if (IS_ERR(tb)) {
  1035. ret = PTR_ERR(tb);
  1036. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  1037. return ret;
  1038. }
  1039. ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
  1040. phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
  1041. if (!ev || !phyerrs) {
  1042. kfree(tb);
  1043. return -EPROTO;
  1044. }
  1045. arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
  1046. arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
  1047. arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
  1048. arg->buf_len = __le32_to_cpu(ev->buf_len);
  1049. arg->phyerrs = phyerrs;
  1050. kfree(tb);
  1051. return 0;
  1052. }
  1053. #define WMI_TLV_ABI_VER_NS0 0x5F414351
  1054. #define WMI_TLV_ABI_VER_NS1 0x00004C4D
  1055. #define WMI_TLV_ABI_VER_NS2 0x00000000
  1056. #define WMI_TLV_ABI_VER_NS3 0x00000000
  1057. #define WMI_TLV_ABI_VER0_MAJOR 1
  1058. #define WMI_TLV_ABI_VER0_MINOR 0
  1059. #define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
  1060. (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
  1061. #define WMI_TLV_ABI_VER1 53
  1062. static int
  1063. ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
  1064. const void *ptr, void *data)
  1065. {
  1066. struct wmi_svc_rdy_ev_arg *arg = data;
  1067. int i;
  1068. if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
  1069. return -EPROTO;
  1070. for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
  1071. if (!arg->mem_reqs[i]) {
  1072. arg->mem_reqs[i] = ptr;
  1073. return 0;
  1074. }
  1075. }
  1076. return -ENOMEM;
  1077. }
  1078. struct wmi_tlv_svc_rdy_parse {
  1079. const struct hal_reg_capabilities *reg;
  1080. const struct wmi_tlv_svc_rdy_ev *ev;
  1081. const __le32 *svc_bmap;
  1082. const struct wlan_host_mem_req *mem_reqs;
  1083. bool svc_bmap_done;
  1084. bool dbs_hw_mode_done;
  1085. };
  1086. static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
  1087. const void *ptr, void *data)
  1088. {
  1089. struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
  1090. switch (tag) {
  1091. case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
  1092. svc_rdy->ev = ptr;
  1093. break;
  1094. case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
  1095. svc_rdy->reg = ptr;
  1096. break;
  1097. case WMI_TLV_TAG_ARRAY_STRUCT:
  1098. svc_rdy->mem_reqs = ptr;
  1099. break;
  1100. case WMI_TLV_TAG_ARRAY_UINT32:
  1101. if (!svc_rdy->svc_bmap_done) {
  1102. svc_rdy->svc_bmap_done = true;
  1103. svc_rdy->svc_bmap = ptr;
  1104. } else if (!svc_rdy->dbs_hw_mode_done) {
  1105. svc_rdy->dbs_hw_mode_done = true;
  1106. }
  1107. break;
  1108. default:
  1109. break;
  1110. }
  1111. return 0;
  1112. }
  1113. static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
  1114. struct sk_buff *skb,
  1115. struct wmi_svc_rdy_ev_arg *arg)
  1116. {
  1117. const struct hal_reg_capabilities *reg;
  1118. const struct wmi_tlv_svc_rdy_ev *ev;
  1119. const __le32 *svc_bmap;
  1120. const struct wlan_host_mem_req *mem_reqs;
  1121. struct wmi_tlv_svc_rdy_parse svc_rdy = { };
  1122. int ret;
  1123. ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
  1124. ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
  1125. if (ret) {
  1126. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  1127. return ret;
  1128. }
  1129. ev = svc_rdy.ev;
  1130. reg = svc_rdy.reg;
  1131. svc_bmap = svc_rdy.svc_bmap;
  1132. mem_reqs = svc_rdy.mem_reqs;
  1133. if (!ev || !reg || !svc_bmap || !mem_reqs)
  1134. return -EPROTO;
  1135. /* This is an internal ABI compatibility check for WMI TLV so check it
  1136. * here instead of the generic WMI code.
  1137. */
  1138. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1139. "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
  1140. __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
  1141. __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
  1142. __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
  1143. __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
  1144. __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
  1145. if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
  1146. __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
  1147. __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
  1148. __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
  1149. __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
  1150. return -ENOTSUPP;
  1151. }
  1152. arg->min_tx_power = ev->hw_min_tx_power;
  1153. arg->max_tx_power = ev->hw_max_tx_power;
  1154. arg->ht_cap = ev->ht_cap_info;
  1155. arg->vht_cap = ev->vht_cap_info;
  1156. arg->vht_supp_mcs = ev->vht_supp_mcs;
  1157. arg->sw_ver0 = ev->abi.abi_ver0;
  1158. arg->sw_ver1 = ev->abi.abi_ver1;
  1159. arg->fw_build = ev->fw_build_vers;
  1160. arg->phy_capab = ev->phy_capability;
  1161. arg->num_rf_chains = ev->num_rf_chains;
  1162. arg->eeprom_rd = reg->eeprom_rd;
  1163. arg->low_2ghz_chan = reg->low_2ghz_chan;
  1164. arg->high_2ghz_chan = reg->high_2ghz_chan;
  1165. arg->low_5ghz_chan = reg->low_5ghz_chan;
  1166. arg->high_5ghz_chan = reg->high_5ghz_chan;
  1167. arg->num_mem_reqs = ev->num_mem_reqs;
  1168. arg->service_map = svc_bmap;
  1169. arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
  1170. arg->sys_cap_info = ev->sys_cap_info;
  1171. ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
  1172. ath10k_wmi_tlv_parse_mem_reqs, arg);
  1173. if (ret) {
  1174. ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
  1175. return ret;
  1176. }
  1177. return 0;
  1178. }
  1179. static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
  1180. struct sk_buff *skb,
  1181. struct wmi_rdy_ev_arg *arg)
  1182. {
  1183. const void **tb;
  1184. const struct wmi_tlv_rdy_ev *ev;
  1185. int ret;
  1186. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  1187. if (IS_ERR(tb)) {
  1188. ret = PTR_ERR(tb);
  1189. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  1190. return ret;
  1191. }
  1192. ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
  1193. if (!ev) {
  1194. kfree(tb);
  1195. return -EPROTO;
  1196. }
  1197. arg->sw_version = ev->abi.abi_ver0;
  1198. arg->abi_version = ev->abi.abi_ver1;
  1199. arg->status = ev->status;
  1200. arg->mac_addr = ev->mac_addr.addr;
  1201. kfree(tb);
  1202. return 0;
  1203. }
  1204. static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
  1205. const void *ptr, void *data)
  1206. {
  1207. struct wmi_svc_avail_ev_arg *arg = data;
  1208. switch (tag) {
  1209. case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
  1210. arg->service_map_ext_valid = true;
  1211. arg->service_map_ext_len = *(__le32 *)ptr;
  1212. arg->service_map_ext = ptr + sizeof(__le32);
  1213. return 0;
  1214. default:
  1215. break;
  1216. }
  1217. return 0;
  1218. }
  1219. static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
  1220. struct sk_buff *skb,
  1221. struct wmi_svc_avail_ev_arg *arg)
  1222. {
  1223. int ret;
  1224. ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
  1225. ath10k_wmi_tlv_svc_avail_parse, arg);
  1226. if (ret) {
  1227. ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
  1228. return ret;
  1229. }
  1230. return 0;
  1231. }
  1232. static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
  1233. struct ath10k_fw_stats_vdev *dst)
  1234. {
  1235. int i;
  1236. dst->vdev_id = __le32_to_cpu(src->vdev_id);
  1237. dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
  1238. dst->data_snr = __le32_to_cpu(src->data_snr);
  1239. dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
  1240. dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
  1241. dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
  1242. dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
  1243. dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
  1244. dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
  1245. for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
  1246. dst->num_tx_frames[i] =
  1247. __le32_to_cpu(src->num_tx_frames[i]);
  1248. for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
  1249. dst->num_tx_frames_retries[i] =
  1250. __le32_to_cpu(src->num_tx_frames_retries[i]);
  1251. for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
  1252. dst->num_tx_frames_failures[i] =
  1253. __le32_to_cpu(src->num_tx_frames_failures[i]);
  1254. for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
  1255. dst->tx_rate_history[i] =
  1256. __le32_to_cpu(src->tx_rate_history[i]);
  1257. for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
  1258. dst->beacon_rssi_history[i] =
  1259. __le32_to_cpu(src->beacon_rssi_history[i]);
  1260. }
  1261. static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
  1262. struct sk_buff *skb,
  1263. struct ath10k_fw_stats *stats)
  1264. {
  1265. const void **tb;
  1266. const struct wmi_tlv_stats_ev *ev;
  1267. u32 num_peer_stats_extd;
  1268. const void *data;
  1269. u32 num_pdev_stats;
  1270. u32 num_vdev_stats;
  1271. u32 num_peer_stats;
  1272. u32 num_bcnflt_stats;
  1273. u32 num_chan_stats;
  1274. size_t data_len;
  1275. u32 stats_id;
  1276. int ret;
  1277. int i;
  1278. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  1279. if (IS_ERR(tb)) {
  1280. ret = PTR_ERR(tb);
  1281. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  1282. return ret;
  1283. }
  1284. ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
  1285. data = tb[WMI_TLV_TAG_ARRAY_BYTE];
  1286. if (!ev || !data) {
  1287. kfree(tb);
  1288. return -EPROTO;
  1289. }
  1290. data_len = ath10k_wmi_tlv_len(data);
  1291. num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
  1292. num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
  1293. num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
  1294. num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
  1295. num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
  1296. stats_id = __le32_to_cpu(ev->stats_id);
  1297. num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd);
  1298. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1299. "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n",
  1300. num_pdev_stats, num_vdev_stats, num_peer_stats,
  1301. num_bcnflt_stats, num_chan_stats, num_peer_stats_extd);
  1302. for (i = 0; i < num_pdev_stats; i++) {
  1303. const struct wmi_pdev_stats *src;
  1304. struct ath10k_fw_stats_pdev *dst;
  1305. src = data;
  1306. if (data_len < sizeof(*src)) {
  1307. kfree(tb);
  1308. return -EPROTO;
  1309. }
  1310. data += sizeof(*src);
  1311. data_len -= sizeof(*src);
  1312. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1313. if (!dst)
  1314. continue;
  1315. ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
  1316. ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
  1317. ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
  1318. list_add_tail(&dst->list, &stats->pdevs);
  1319. }
  1320. for (i = 0; i < num_vdev_stats; i++) {
  1321. const struct wmi_tlv_vdev_stats *src;
  1322. struct ath10k_fw_stats_vdev *dst;
  1323. src = data;
  1324. if (data_len < sizeof(*src)) {
  1325. kfree(tb);
  1326. return -EPROTO;
  1327. }
  1328. data += sizeof(*src);
  1329. data_len -= sizeof(*src);
  1330. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1331. if (!dst)
  1332. continue;
  1333. ath10k_wmi_tlv_pull_vdev_stats(src, dst);
  1334. list_add_tail(&dst->list, &stats->vdevs);
  1335. }
  1336. for (i = 0; i < num_peer_stats; i++) {
  1337. const struct wmi_10x_peer_stats *src;
  1338. struct ath10k_fw_stats_peer *dst;
  1339. src = data;
  1340. if (data_len < sizeof(*src)) {
  1341. kfree(tb);
  1342. return -EPROTO;
  1343. }
  1344. data += sizeof(*src);
  1345. data_len -= sizeof(*src);
  1346. dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
  1347. if (!dst)
  1348. continue;
  1349. ath10k_wmi_pull_peer_stats(&src->old, dst);
  1350. dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
  1351. if (stats_id & WMI_TLV_STAT_PEER_EXTD) {
  1352. const struct wmi_tlv_peer_stats_extd *extd;
  1353. unsigned long rx_duration_high;
  1354. extd = data + sizeof(*src) * (num_peer_stats - i - 1)
  1355. + sizeof(*extd) * i;
  1356. dst->rx_duration = __le32_to_cpu(extd->rx_duration);
  1357. rx_duration_high = __le32_to_cpu
  1358. (extd->rx_duration_high);
  1359. if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT,
  1360. &rx_duration_high)) {
  1361. rx_duration_high =
  1362. FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK,
  1363. rx_duration_high);
  1364. dst->rx_duration |= (u64)rx_duration_high <<
  1365. WMI_TLV_PEER_RX_DURATION_SHIFT;
  1366. }
  1367. }
  1368. list_add_tail(&dst->list, &stats->peers);
  1369. }
  1370. kfree(tb);
  1371. return 0;
  1372. }
  1373. static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
  1374. struct sk_buff *skb,
  1375. struct wmi_roam_ev_arg *arg)
  1376. {
  1377. const void **tb;
  1378. const struct wmi_tlv_roam_ev *ev;
  1379. int ret;
  1380. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  1381. if (IS_ERR(tb)) {
  1382. ret = PTR_ERR(tb);
  1383. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  1384. return ret;
  1385. }
  1386. ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
  1387. if (!ev) {
  1388. kfree(tb);
  1389. return -EPROTO;
  1390. }
  1391. arg->vdev_id = ev->vdev_id;
  1392. arg->reason = ev->reason;
  1393. arg->rssi = ev->rssi;
  1394. kfree(tb);
  1395. return 0;
  1396. }
  1397. static int
  1398. ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
  1399. struct wmi_wow_ev_arg *arg)
  1400. {
  1401. const void **tb;
  1402. const struct wmi_tlv_wow_event_info *ev;
  1403. int ret;
  1404. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  1405. if (IS_ERR(tb)) {
  1406. ret = PTR_ERR(tb);
  1407. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  1408. return ret;
  1409. }
  1410. ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
  1411. if (!ev) {
  1412. kfree(tb);
  1413. return -EPROTO;
  1414. }
  1415. arg->vdev_id = __le32_to_cpu(ev->vdev_id);
  1416. arg->flag = __le32_to_cpu(ev->flag);
  1417. arg->wake_reason = __le32_to_cpu(ev->wake_reason);
  1418. arg->data_len = __le32_to_cpu(ev->data_len);
  1419. kfree(tb);
  1420. return 0;
  1421. }
  1422. static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
  1423. struct sk_buff *skb,
  1424. struct wmi_echo_ev_arg *arg)
  1425. {
  1426. const void **tb;
  1427. const struct wmi_echo_event *ev;
  1428. int ret;
  1429. tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
  1430. if (IS_ERR(tb)) {
  1431. ret = PTR_ERR(tb);
  1432. ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
  1433. return ret;
  1434. }
  1435. ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
  1436. if (!ev) {
  1437. kfree(tb);
  1438. return -EPROTO;
  1439. }
  1440. arg->value = ev->value;
  1441. kfree(tb);
  1442. return 0;
  1443. }
  1444. static struct sk_buff *
  1445. ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
  1446. {
  1447. struct wmi_tlv_pdev_suspend *cmd;
  1448. struct wmi_tlv *tlv;
  1449. struct sk_buff *skb;
  1450. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1451. if (!skb)
  1452. return ERR_PTR(-ENOMEM);
  1453. tlv = (void *)skb->data;
  1454. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
  1455. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1456. cmd = (void *)tlv->value;
  1457. cmd->opt = __cpu_to_le32(opt);
  1458. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
  1459. return skb;
  1460. }
  1461. static struct sk_buff *
  1462. ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
  1463. {
  1464. struct wmi_tlv_resume_cmd *cmd;
  1465. struct wmi_tlv *tlv;
  1466. struct sk_buff *skb;
  1467. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1468. if (!skb)
  1469. return ERR_PTR(-ENOMEM);
  1470. tlv = (void *)skb->data;
  1471. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
  1472. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1473. cmd = (void *)tlv->value;
  1474. cmd->reserved = __cpu_to_le32(0);
  1475. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
  1476. return skb;
  1477. }
  1478. static struct sk_buff *
  1479. ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
  1480. u16 rd, u16 rd2g, u16 rd5g,
  1481. u16 ctl2g, u16 ctl5g,
  1482. enum wmi_dfs_region dfs_reg)
  1483. {
  1484. struct wmi_tlv_pdev_set_rd_cmd *cmd;
  1485. struct wmi_tlv *tlv;
  1486. struct sk_buff *skb;
  1487. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1488. if (!skb)
  1489. return ERR_PTR(-ENOMEM);
  1490. tlv = (void *)skb->data;
  1491. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
  1492. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1493. cmd = (void *)tlv->value;
  1494. cmd->regd = __cpu_to_le32(rd);
  1495. cmd->regd_2ghz = __cpu_to_le32(rd2g);
  1496. cmd->regd_5ghz = __cpu_to_le32(rd5g);
  1497. cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
  1498. cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
  1499. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
  1500. return skb;
  1501. }
  1502. static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
  1503. {
  1504. return WMI_TXBF_CONF_AFTER_ASSOC;
  1505. }
  1506. static struct sk_buff *
  1507. ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
  1508. u32 param_value)
  1509. {
  1510. struct wmi_tlv_pdev_set_param_cmd *cmd;
  1511. struct wmi_tlv *tlv;
  1512. struct sk_buff *skb;
  1513. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1514. if (!skb)
  1515. return ERR_PTR(-ENOMEM);
  1516. tlv = (void *)skb->data;
  1517. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
  1518. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1519. cmd = (void *)tlv->value;
  1520. cmd->param_id = __cpu_to_le32(param_id);
  1521. cmd->param_value = __cpu_to_le32(param_value);
  1522. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n",
  1523. param_id, param_value);
  1524. return skb;
  1525. }
  1526. static void
  1527. ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
  1528. {
  1529. struct host_memory_chunk_tlv *chunk;
  1530. struct wmi_tlv *tlv;
  1531. dma_addr_t paddr;
  1532. int i;
  1533. __le16 tlv_len, tlv_tag;
  1534. tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK);
  1535. tlv_len = __cpu_to_le16(sizeof(*chunk));
  1536. for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
  1537. tlv = host_mem_chunks;
  1538. tlv->tag = tlv_tag;
  1539. tlv->len = tlv_len;
  1540. chunk = (void *)tlv->value;
  1541. chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
  1542. chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
  1543. chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
  1544. if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
  1545. ar->wmi.svc_map)) {
  1546. paddr = ar->wmi.mem_chunks[i].paddr;
  1547. chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
  1548. }
  1549. ath10k_dbg(ar, ATH10K_DBG_WMI,
  1550. "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
  1551. i,
  1552. ar->wmi.mem_chunks[i].len,
  1553. (unsigned long long)ar->wmi.mem_chunks[i].paddr,
  1554. ar->wmi.mem_chunks[i].req_id);
  1555. host_mem_chunks += sizeof(*tlv);
  1556. host_mem_chunks += sizeof(*chunk);
  1557. }
  1558. }
  1559. static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
  1560. {
  1561. struct sk_buff *skb;
  1562. struct wmi_tlv *tlv;
  1563. struct wmi_tlv_init_cmd *cmd;
  1564. struct wmi_tlv_resource_config *cfg;
  1565. void *chunks;
  1566. size_t len, chunks_len;
  1567. void *ptr;
  1568. chunks_len = ar->wmi.num_mem_chunks *
  1569. (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
  1570. len = (sizeof(*tlv) + sizeof(*cmd)) +
  1571. (sizeof(*tlv) + sizeof(*cfg)) +
  1572. (sizeof(*tlv) + chunks_len);
  1573. skb = ath10k_wmi_alloc_skb(ar, len);
  1574. if (!skb)
  1575. return ERR_PTR(-ENOMEM);
  1576. ptr = skb->data;
  1577. tlv = ptr;
  1578. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
  1579. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1580. cmd = (void *)tlv->value;
  1581. ptr += sizeof(*tlv);
  1582. ptr += sizeof(*cmd);
  1583. tlv = ptr;
  1584. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
  1585. tlv->len = __cpu_to_le16(sizeof(*cfg));
  1586. cfg = (void *)tlv->value;
  1587. ptr += sizeof(*tlv);
  1588. ptr += sizeof(*cfg);
  1589. tlv = ptr;
  1590. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  1591. tlv->len = __cpu_to_le16(chunks_len);
  1592. chunks = (void *)tlv->value;
  1593. ptr += sizeof(*tlv);
  1594. ptr += chunks_len;
  1595. cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
  1596. cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
  1597. cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
  1598. cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
  1599. cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
  1600. cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
  1601. cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
  1602. cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
  1603. if (ar->hw_params.num_peers)
  1604. cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
  1605. else
  1606. cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
  1607. cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
  1608. cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
  1609. if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
  1610. cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
  1611. cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
  1612. } else {
  1613. cfg->num_offload_peers = __cpu_to_le32(0);
  1614. cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
  1615. }
  1616. cfg->num_peer_keys = __cpu_to_le32(2);
  1617. if (ar->hw_params.num_peers)
  1618. cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
  1619. else
  1620. cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
  1621. cfg->tx_chain_mask = __cpu_to_le32(0x7);
  1622. cfg->rx_chain_mask = __cpu_to_le32(0x7);
  1623. cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
  1624. cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
  1625. cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
  1626. cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
  1627. cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
  1628. cfg->scan_max_pending_reqs = __cpu_to_le32(4);
  1629. cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
  1630. cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
  1631. cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
  1632. cfg->num_mcast_groups = __cpu_to_le32(0);
  1633. cfg->num_mcast_table_elems = __cpu_to_le32(0);
  1634. cfg->mcast2ucast_mode = __cpu_to_le32(0);
  1635. cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
  1636. cfg->dma_burst_size = __cpu_to_le32(0);
  1637. cfg->mac_aggr_delim = __cpu_to_le32(0);
  1638. cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
  1639. cfg->vow_config = __cpu_to_le32(0);
  1640. cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
  1641. cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
  1642. cfg->max_frag_entries = __cpu_to_le32(2);
  1643. cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
  1644. cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
  1645. cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
  1646. cfg->num_multicast_filter_entries = __cpu_to_le32(5);
  1647. cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
  1648. cfg->num_keep_alive_pattern = __cpu_to_le32(6);
  1649. cfg->keep_alive_pattern_size = __cpu_to_le32(0);
  1650. cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
  1651. cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
  1652. cfg->wmi_send_separate = __cpu_to_le32(0);
  1653. cfg->num_ocb_vdevs = __cpu_to_le32(0);
  1654. cfg->num_ocb_channels = __cpu_to_le32(0);
  1655. cfg->num_ocb_schedules = __cpu_to_le32(0);
  1656. cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL);
  1657. if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
  1658. cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI);
  1659. ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks);
  1660. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
  1661. return skb;
  1662. }
  1663. static struct sk_buff *
  1664. ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
  1665. const struct wmi_start_scan_arg *arg)
  1666. {
  1667. struct wmi_tlv_start_scan_cmd *cmd;
  1668. struct wmi_tlv *tlv;
  1669. struct sk_buff *skb;
  1670. size_t len, chan_len, ssid_len, bssid_len, ie_len;
  1671. __le32 *chans;
  1672. struct wmi_ssid *ssids;
  1673. struct wmi_mac_addr *addrs;
  1674. void *ptr;
  1675. int i, ret;
  1676. ret = ath10k_wmi_start_scan_verify(arg);
  1677. if (ret)
  1678. return ERR_PTR(ret);
  1679. chan_len = arg->n_channels * sizeof(__le32);
  1680. ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
  1681. bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
  1682. ie_len = roundup(arg->ie_len, 4);
  1683. len = (sizeof(*tlv) + sizeof(*cmd)) +
  1684. sizeof(*tlv) + chan_len +
  1685. sizeof(*tlv) + ssid_len +
  1686. sizeof(*tlv) + bssid_len +
  1687. sizeof(*tlv) + ie_len;
  1688. skb = ath10k_wmi_alloc_skb(ar, len);
  1689. if (!skb)
  1690. return ERR_PTR(-ENOMEM);
  1691. ptr = (void *)skb->data;
  1692. tlv = ptr;
  1693. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
  1694. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1695. cmd = (void *)tlv->value;
  1696. ath10k_wmi_put_start_scan_common(&cmd->common, arg);
  1697. cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
  1698. cmd->num_channels = __cpu_to_le32(arg->n_channels);
  1699. cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
  1700. cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
  1701. cmd->ie_len = __cpu_to_le32(arg->ie_len);
  1702. cmd->num_probes = __cpu_to_le32(3);
  1703. ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
  1704. ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
  1705. /* FIXME: There are some scan flag inconsistencies across firmwares,
  1706. * e.g. WMI-TLV inverts the logic behind the following flag.
  1707. */
  1708. cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
  1709. ptr += sizeof(*tlv);
  1710. ptr += sizeof(*cmd);
  1711. tlv = ptr;
  1712. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
  1713. tlv->len = __cpu_to_le16(chan_len);
  1714. chans = (void *)tlv->value;
  1715. for (i = 0; i < arg->n_channels; i++)
  1716. chans[i] = __cpu_to_le32(arg->channels[i]);
  1717. ptr += sizeof(*tlv);
  1718. ptr += chan_len;
  1719. tlv = ptr;
  1720. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
  1721. tlv->len = __cpu_to_le16(ssid_len);
  1722. ssids = (void *)tlv->value;
  1723. for (i = 0; i < arg->n_ssids; i++) {
  1724. ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
  1725. memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
  1726. }
  1727. ptr += sizeof(*tlv);
  1728. ptr += ssid_len;
  1729. tlv = ptr;
  1730. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
  1731. tlv->len = __cpu_to_le16(bssid_len);
  1732. addrs = (void *)tlv->value;
  1733. for (i = 0; i < arg->n_bssids; i++)
  1734. ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
  1735. ptr += sizeof(*tlv);
  1736. ptr += bssid_len;
  1737. tlv = ptr;
  1738. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  1739. tlv->len = __cpu_to_le16(ie_len);
  1740. memcpy(tlv->value, arg->ie, arg->ie_len);
  1741. ptr += sizeof(*tlv);
  1742. ptr += ie_len;
  1743. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
  1744. return skb;
  1745. }
  1746. static struct sk_buff *
  1747. ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
  1748. const struct wmi_stop_scan_arg *arg)
  1749. {
  1750. struct wmi_stop_scan_cmd *cmd;
  1751. struct wmi_tlv *tlv;
  1752. struct sk_buff *skb;
  1753. u32 scan_id;
  1754. u32 req_id;
  1755. if (arg->req_id > 0xFFF)
  1756. return ERR_PTR(-EINVAL);
  1757. if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
  1758. return ERR_PTR(-EINVAL);
  1759. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1760. if (!skb)
  1761. return ERR_PTR(-ENOMEM);
  1762. scan_id = arg->u.scan_id;
  1763. scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
  1764. req_id = arg->req_id;
  1765. req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
  1766. tlv = (void *)skb->data;
  1767. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
  1768. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1769. cmd = (void *)tlv->value;
  1770. cmd->req_type = __cpu_to_le32(arg->req_type);
  1771. cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
  1772. cmd->scan_id = __cpu_to_le32(scan_id);
  1773. cmd->scan_req_id = __cpu_to_le32(req_id);
  1774. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
  1775. return skb;
  1776. }
  1777. static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
  1778. enum wmi_vdev_subtype subtype)
  1779. {
  1780. switch (subtype) {
  1781. case WMI_VDEV_SUBTYPE_NONE:
  1782. return WMI_TLV_VDEV_SUBTYPE_NONE;
  1783. case WMI_VDEV_SUBTYPE_P2P_DEVICE:
  1784. return WMI_TLV_VDEV_SUBTYPE_P2P_DEV;
  1785. case WMI_VDEV_SUBTYPE_P2P_CLIENT:
  1786. return WMI_TLV_VDEV_SUBTYPE_P2P_CLI;
  1787. case WMI_VDEV_SUBTYPE_P2P_GO:
  1788. return WMI_TLV_VDEV_SUBTYPE_P2P_GO;
  1789. case WMI_VDEV_SUBTYPE_PROXY_STA:
  1790. return WMI_TLV_VDEV_SUBTYPE_PROXY_STA;
  1791. case WMI_VDEV_SUBTYPE_MESH_11S:
  1792. return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
  1793. case WMI_VDEV_SUBTYPE_MESH_NON_11S:
  1794. return -ENOTSUPP;
  1795. }
  1796. return -ENOTSUPP;
  1797. }
  1798. static struct sk_buff *
  1799. ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
  1800. u32 vdev_id,
  1801. enum wmi_vdev_type vdev_type,
  1802. enum wmi_vdev_subtype vdev_subtype,
  1803. const u8 mac_addr[ETH_ALEN])
  1804. {
  1805. struct wmi_vdev_create_cmd *cmd;
  1806. struct wmi_tlv *tlv;
  1807. struct sk_buff *skb;
  1808. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1809. if (!skb)
  1810. return ERR_PTR(-ENOMEM);
  1811. tlv = (void *)skb->data;
  1812. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
  1813. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1814. cmd = (void *)tlv->value;
  1815. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1816. cmd->vdev_type = __cpu_to_le32(vdev_type);
  1817. cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
  1818. ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
  1819. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
  1820. return skb;
  1821. }
  1822. static struct sk_buff *
  1823. ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
  1824. {
  1825. struct wmi_vdev_delete_cmd *cmd;
  1826. struct wmi_tlv *tlv;
  1827. struct sk_buff *skb;
  1828. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1829. if (!skb)
  1830. return ERR_PTR(-ENOMEM);
  1831. tlv = (void *)skb->data;
  1832. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
  1833. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1834. cmd = (void *)tlv->value;
  1835. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1836. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
  1837. return skb;
  1838. }
  1839. static struct sk_buff *
  1840. ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
  1841. const struct wmi_vdev_start_request_arg *arg,
  1842. bool restart)
  1843. {
  1844. struct wmi_tlv_vdev_start_cmd *cmd;
  1845. struct wmi_channel *ch;
  1846. struct wmi_tlv *tlv;
  1847. struct sk_buff *skb;
  1848. size_t len;
  1849. void *ptr;
  1850. u32 flags = 0;
  1851. if (WARN_ON(arg->hidden_ssid && !arg->ssid))
  1852. return ERR_PTR(-EINVAL);
  1853. if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
  1854. return ERR_PTR(-EINVAL);
  1855. len = (sizeof(*tlv) + sizeof(*cmd)) +
  1856. (sizeof(*tlv) + sizeof(*ch)) +
  1857. (sizeof(*tlv) + 0);
  1858. skb = ath10k_wmi_alloc_skb(ar, len);
  1859. if (!skb)
  1860. return ERR_PTR(-ENOMEM);
  1861. if (arg->hidden_ssid)
  1862. flags |= WMI_VDEV_START_HIDDEN_SSID;
  1863. if (arg->pmf_enabled)
  1864. flags |= WMI_VDEV_START_PMF_ENABLED;
  1865. ptr = (void *)skb->data;
  1866. tlv = ptr;
  1867. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
  1868. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1869. cmd = (void *)tlv->value;
  1870. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  1871. cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
  1872. cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
  1873. cmd->flags = __cpu_to_le32(flags);
  1874. cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
  1875. cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
  1876. cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
  1877. if (arg->ssid) {
  1878. cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
  1879. memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
  1880. }
  1881. ptr += sizeof(*tlv);
  1882. ptr += sizeof(*cmd);
  1883. tlv = ptr;
  1884. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
  1885. tlv->len = __cpu_to_le16(sizeof(*ch));
  1886. ch = (void *)tlv->value;
  1887. ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
  1888. ptr += sizeof(*tlv);
  1889. ptr += sizeof(*ch);
  1890. tlv = ptr;
  1891. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  1892. tlv->len = 0;
  1893. /* Note: This is a nested TLV containing:
  1894. * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
  1895. */
  1896. ptr += sizeof(*tlv);
  1897. ptr += 0;
  1898. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
  1899. return skb;
  1900. }
  1901. static struct sk_buff *
  1902. ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
  1903. {
  1904. struct wmi_vdev_stop_cmd *cmd;
  1905. struct wmi_tlv *tlv;
  1906. struct sk_buff *skb;
  1907. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1908. if (!skb)
  1909. return ERR_PTR(-ENOMEM);
  1910. tlv = (void *)skb->data;
  1911. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
  1912. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1913. cmd = (void *)tlv->value;
  1914. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1915. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
  1916. return skb;
  1917. }
  1918. static struct sk_buff *
  1919. ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
  1920. const u8 *bssid)
  1921. {
  1922. struct wmi_vdev_up_cmd *cmd;
  1923. struct wmi_tlv *tlv;
  1924. struct sk_buff *skb;
  1925. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1926. if (!skb)
  1927. return ERR_PTR(-ENOMEM);
  1928. tlv = (void *)skb->data;
  1929. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
  1930. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1931. cmd = (void *)tlv->value;
  1932. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1933. cmd->vdev_assoc_id = __cpu_to_le32(aid);
  1934. ether_addr_copy(cmd->vdev_bssid.addr, bssid);
  1935. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
  1936. return skb;
  1937. }
  1938. static struct sk_buff *
  1939. ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
  1940. {
  1941. struct wmi_vdev_down_cmd *cmd;
  1942. struct wmi_tlv *tlv;
  1943. struct sk_buff *skb;
  1944. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1945. if (!skb)
  1946. return ERR_PTR(-ENOMEM);
  1947. tlv = (void *)skb->data;
  1948. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
  1949. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1950. cmd = (void *)tlv->value;
  1951. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1952. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
  1953. return skb;
  1954. }
  1955. static struct sk_buff *
  1956. ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
  1957. u32 param_id, u32 param_value)
  1958. {
  1959. struct wmi_vdev_set_param_cmd *cmd;
  1960. struct wmi_tlv *tlv;
  1961. struct sk_buff *skb;
  1962. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  1963. if (!skb)
  1964. return ERR_PTR(-ENOMEM);
  1965. tlv = (void *)skb->data;
  1966. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
  1967. tlv->len = __cpu_to_le16(sizeof(*cmd));
  1968. cmd = (void *)tlv->value;
  1969. cmd->vdev_id = __cpu_to_le32(vdev_id);
  1970. cmd->param_id = __cpu_to_le32(param_id);
  1971. cmd->param_value = __cpu_to_le32(param_value);
  1972. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n",
  1973. vdev_id, param_id, param_value);
  1974. return skb;
  1975. }
  1976. static struct sk_buff *
  1977. ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
  1978. const struct wmi_vdev_install_key_arg *arg)
  1979. {
  1980. struct wmi_vdev_install_key_cmd *cmd;
  1981. struct wmi_tlv *tlv;
  1982. struct sk_buff *skb;
  1983. size_t len;
  1984. void *ptr;
  1985. if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
  1986. arg->key_data)
  1987. return ERR_PTR(-EINVAL);
  1988. if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
  1989. !arg->key_data)
  1990. return ERR_PTR(-EINVAL);
  1991. len = sizeof(*tlv) + sizeof(*cmd) +
  1992. sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
  1993. skb = ath10k_wmi_alloc_skb(ar, len);
  1994. if (!skb)
  1995. return ERR_PTR(-ENOMEM);
  1996. ptr = (void *)skb->data;
  1997. tlv = ptr;
  1998. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
  1999. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2000. cmd = (void *)tlv->value;
  2001. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  2002. cmd->key_idx = __cpu_to_le32(arg->key_idx);
  2003. cmd->key_flags = __cpu_to_le32(arg->key_flags);
  2004. cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
  2005. cmd->key_len = __cpu_to_le32(arg->key_len);
  2006. cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
  2007. cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
  2008. if (arg->macaddr)
  2009. ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
  2010. ptr += sizeof(*tlv);
  2011. ptr += sizeof(*cmd);
  2012. tlv = ptr;
  2013. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  2014. tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
  2015. if (arg->key_data)
  2016. memcpy(tlv->value, arg->key_data, arg->key_len);
  2017. ptr += sizeof(*tlv);
  2018. ptr += roundup(arg->key_len, sizeof(__le32));
  2019. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
  2020. return skb;
  2021. }
  2022. static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
  2023. const struct wmi_sta_uapsd_auto_trig_arg *arg)
  2024. {
  2025. struct wmi_sta_uapsd_auto_trig_param *ac;
  2026. struct wmi_tlv *tlv;
  2027. tlv = ptr;
  2028. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
  2029. tlv->len = __cpu_to_le16(sizeof(*ac));
  2030. ac = (void *)tlv->value;
  2031. ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
  2032. ac->user_priority = __cpu_to_le32(arg->user_priority);
  2033. ac->service_interval = __cpu_to_le32(arg->service_interval);
  2034. ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
  2035. ac->delay_interval = __cpu_to_le32(arg->delay_interval);
  2036. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2037. "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
  2038. ac->wmm_ac, ac->user_priority, ac->service_interval,
  2039. ac->suspend_interval, ac->delay_interval);
  2040. return ptr + sizeof(*tlv) + sizeof(*ac);
  2041. }
  2042. static struct sk_buff *
  2043. ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
  2044. const u8 peer_addr[ETH_ALEN],
  2045. const struct wmi_sta_uapsd_auto_trig_arg *args,
  2046. u32 num_ac)
  2047. {
  2048. struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
  2049. struct wmi_sta_uapsd_auto_trig_param *ac;
  2050. struct wmi_tlv *tlv;
  2051. struct sk_buff *skb;
  2052. size_t len;
  2053. size_t ac_tlv_len;
  2054. void *ptr;
  2055. int i;
  2056. ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
  2057. len = sizeof(*tlv) + sizeof(*cmd) +
  2058. sizeof(*tlv) + ac_tlv_len;
  2059. skb = ath10k_wmi_alloc_skb(ar, len);
  2060. if (!skb)
  2061. return ERR_PTR(-ENOMEM);
  2062. ptr = (void *)skb->data;
  2063. tlv = ptr;
  2064. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
  2065. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2066. cmd = (void *)tlv->value;
  2067. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2068. cmd->num_ac = __cpu_to_le32(num_ac);
  2069. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  2070. ptr += sizeof(*tlv);
  2071. ptr += sizeof(*cmd);
  2072. tlv = ptr;
  2073. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  2074. tlv->len = __cpu_to_le16(ac_tlv_len);
  2075. ac = (void *)tlv->value;
  2076. ptr += sizeof(*tlv);
  2077. for (i = 0; i < num_ac; i++)
  2078. ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
  2079. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
  2080. return skb;
  2081. }
  2082. static void *ath10k_wmi_tlv_put_wmm(void *ptr,
  2083. const struct wmi_wmm_params_arg *arg)
  2084. {
  2085. struct wmi_wmm_params *wmm;
  2086. struct wmi_tlv *tlv;
  2087. tlv = ptr;
  2088. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
  2089. tlv->len = __cpu_to_le16(sizeof(*wmm));
  2090. wmm = (void *)tlv->value;
  2091. ath10k_wmi_set_wmm_param(wmm, arg);
  2092. return ptr + sizeof(*tlv) + sizeof(*wmm);
  2093. }
  2094. static struct sk_buff *
  2095. ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
  2096. const struct wmi_wmm_params_all_arg *arg)
  2097. {
  2098. struct wmi_tlv_vdev_set_wmm_cmd *cmd;
  2099. struct wmi_tlv *tlv;
  2100. struct sk_buff *skb;
  2101. size_t len;
  2102. void *ptr;
  2103. len = sizeof(*tlv) + sizeof(*cmd);
  2104. skb = ath10k_wmi_alloc_skb(ar, len);
  2105. if (!skb)
  2106. return ERR_PTR(-ENOMEM);
  2107. ptr = (void *)skb->data;
  2108. tlv = ptr;
  2109. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
  2110. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2111. cmd = (void *)tlv->value;
  2112. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2113. ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
  2114. ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
  2115. ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
  2116. ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
  2117. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
  2118. return skb;
  2119. }
  2120. static struct sk_buff *
  2121. ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
  2122. const struct wmi_sta_keepalive_arg *arg)
  2123. {
  2124. struct wmi_tlv_sta_keepalive_cmd *cmd;
  2125. struct wmi_sta_keepalive_arp_resp *arp;
  2126. struct sk_buff *skb;
  2127. struct wmi_tlv *tlv;
  2128. void *ptr;
  2129. size_t len;
  2130. len = sizeof(*tlv) + sizeof(*cmd) +
  2131. sizeof(*tlv) + sizeof(*arp);
  2132. skb = ath10k_wmi_alloc_skb(ar, len);
  2133. if (!skb)
  2134. return ERR_PTR(-ENOMEM);
  2135. ptr = (void *)skb->data;
  2136. tlv = ptr;
  2137. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
  2138. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2139. cmd = (void *)tlv->value;
  2140. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  2141. cmd->enabled = __cpu_to_le32(arg->enabled);
  2142. cmd->method = __cpu_to_le32(arg->method);
  2143. cmd->interval = __cpu_to_le32(arg->interval);
  2144. ptr += sizeof(*tlv);
  2145. ptr += sizeof(*cmd);
  2146. tlv = ptr;
  2147. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
  2148. tlv->len = __cpu_to_le16(sizeof(*arp));
  2149. arp = (void *)tlv->value;
  2150. arp->src_ip4_addr = arg->src_ip4_addr;
  2151. arp->dest_ip4_addr = arg->dest_ip4_addr;
  2152. ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
  2153. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
  2154. arg->vdev_id, arg->enabled, arg->method, arg->interval);
  2155. return skb;
  2156. }
  2157. static struct sk_buff *
  2158. ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
  2159. const u8 peer_addr[ETH_ALEN],
  2160. enum wmi_peer_type peer_type)
  2161. {
  2162. struct wmi_tlv_peer_create_cmd *cmd;
  2163. struct wmi_tlv *tlv;
  2164. struct sk_buff *skb;
  2165. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2166. if (!skb)
  2167. return ERR_PTR(-ENOMEM);
  2168. tlv = (void *)skb->data;
  2169. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
  2170. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2171. cmd = (void *)tlv->value;
  2172. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2173. cmd->peer_type = __cpu_to_le32(peer_type);
  2174. ether_addr_copy(cmd->peer_addr.addr, peer_addr);
  2175. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
  2176. return skb;
  2177. }
  2178. static struct sk_buff *
  2179. ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
  2180. const u8 peer_addr[ETH_ALEN])
  2181. {
  2182. struct wmi_peer_delete_cmd *cmd;
  2183. struct wmi_tlv *tlv;
  2184. struct sk_buff *skb;
  2185. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2186. if (!skb)
  2187. return ERR_PTR(-ENOMEM);
  2188. tlv = (void *)skb->data;
  2189. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
  2190. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2191. cmd = (void *)tlv->value;
  2192. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2193. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  2194. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
  2195. return skb;
  2196. }
  2197. static struct sk_buff *
  2198. ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
  2199. const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
  2200. {
  2201. struct wmi_peer_flush_tids_cmd *cmd;
  2202. struct wmi_tlv *tlv;
  2203. struct sk_buff *skb;
  2204. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2205. if (!skb)
  2206. return ERR_PTR(-ENOMEM);
  2207. tlv = (void *)skb->data;
  2208. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
  2209. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2210. cmd = (void *)tlv->value;
  2211. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2212. cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
  2213. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  2214. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
  2215. return skb;
  2216. }
  2217. static struct sk_buff *
  2218. ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
  2219. const u8 *peer_addr,
  2220. enum wmi_peer_param param_id,
  2221. u32 param_value)
  2222. {
  2223. struct wmi_peer_set_param_cmd *cmd;
  2224. struct wmi_tlv *tlv;
  2225. struct sk_buff *skb;
  2226. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2227. if (!skb)
  2228. return ERR_PTR(-ENOMEM);
  2229. tlv = (void *)skb->data;
  2230. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
  2231. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2232. cmd = (void *)tlv->value;
  2233. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2234. cmd->param_id = __cpu_to_le32(param_id);
  2235. cmd->param_value = __cpu_to_le32(param_value);
  2236. ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
  2237. ath10k_dbg(ar, ATH10K_DBG_WMI,
  2238. "wmi tlv vdev %d peer %pM set param %d value 0x%x\n",
  2239. vdev_id, peer_addr, param_id, param_value);
  2240. return skb;
  2241. }
  2242. static struct sk_buff *
  2243. ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
  2244. const struct wmi_peer_assoc_complete_arg *arg)
  2245. {
  2246. struct wmi_tlv_peer_assoc_cmd *cmd;
  2247. struct wmi_vht_rate_set *vht_rate;
  2248. struct wmi_tlv *tlv;
  2249. struct sk_buff *skb;
  2250. size_t len, legacy_rate_len, ht_rate_len;
  2251. void *ptr;
  2252. if (arg->peer_mpdu_density > 16)
  2253. return ERR_PTR(-EINVAL);
  2254. if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
  2255. return ERR_PTR(-EINVAL);
  2256. if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
  2257. return ERR_PTR(-EINVAL);
  2258. legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
  2259. sizeof(__le32));
  2260. ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
  2261. len = (sizeof(*tlv) + sizeof(*cmd)) +
  2262. (sizeof(*tlv) + legacy_rate_len) +
  2263. (sizeof(*tlv) + ht_rate_len) +
  2264. (sizeof(*tlv) + sizeof(*vht_rate));
  2265. skb = ath10k_wmi_alloc_skb(ar, len);
  2266. if (!skb)
  2267. return ERR_PTR(-ENOMEM);
  2268. ptr = (void *)skb->data;
  2269. tlv = ptr;
  2270. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
  2271. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2272. cmd = (void *)tlv->value;
  2273. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  2274. cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
  2275. cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
  2276. cmd->flags = __cpu_to_le32(arg->peer_flags);
  2277. cmd->caps = __cpu_to_le32(arg->peer_caps);
  2278. cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
  2279. cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
  2280. cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
  2281. cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
  2282. cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
  2283. cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
  2284. cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
  2285. cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
  2286. cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
  2287. cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
  2288. ether_addr_copy(cmd->mac_addr.addr, arg->addr);
  2289. ptr += sizeof(*tlv);
  2290. ptr += sizeof(*cmd);
  2291. tlv = ptr;
  2292. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  2293. tlv->len = __cpu_to_le16(legacy_rate_len);
  2294. memcpy(tlv->value, arg->peer_legacy_rates.rates,
  2295. arg->peer_legacy_rates.num_rates);
  2296. ptr += sizeof(*tlv);
  2297. ptr += legacy_rate_len;
  2298. tlv = ptr;
  2299. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  2300. tlv->len = __cpu_to_le16(ht_rate_len);
  2301. memcpy(tlv->value, arg->peer_ht_rates.rates,
  2302. arg->peer_ht_rates.num_rates);
  2303. ptr += sizeof(*tlv);
  2304. ptr += ht_rate_len;
  2305. tlv = ptr;
  2306. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
  2307. tlv->len = __cpu_to_le16(sizeof(*vht_rate));
  2308. vht_rate = (void *)tlv->value;
  2309. vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
  2310. vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
  2311. vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
  2312. vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
  2313. ptr += sizeof(*tlv);
  2314. ptr += sizeof(*vht_rate);
  2315. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
  2316. return skb;
  2317. }
  2318. static struct sk_buff *
  2319. ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
  2320. enum wmi_sta_ps_mode psmode)
  2321. {
  2322. struct wmi_sta_powersave_mode_cmd *cmd;
  2323. struct wmi_tlv *tlv;
  2324. struct sk_buff *skb;
  2325. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2326. if (!skb)
  2327. return ERR_PTR(-ENOMEM);
  2328. tlv = (void *)skb->data;
  2329. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
  2330. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2331. cmd = (void *)tlv->value;
  2332. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2333. cmd->sta_ps_mode = __cpu_to_le32(psmode);
  2334. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
  2335. return skb;
  2336. }
  2337. static struct sk_buff *
  2338. ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
  2339. enum wmi_sta_powersave_param param_id,
  2340. u32 param_value)
  2341. {
  2342. struct wmi_sta_powersave_param_cmd *cmd;
  2343. struct wmi_tlv *tlv;
  2344. struct sk_buff *skb;
  2345. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2346. if (!skb)
  2347. return ERR_PTR(-ENOMEM);
  2348. tlv = (void *)skb->data;
  2349. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
  2350. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2351. cmd = (void *)tlv->value;
  2352. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2353. cmd->param_id = __cpu_to_le32(param_id);
  2354. cmd->param_value = __cpu_to_le32(param_value);
  2355. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
  2356. return skb;
  2357. }
  2358. static struct sk_buff *
  2359. ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
  2360. enum wmi_ap_ps_peer_param param_id, u32 value)
  2361. {
  2362. struct wmi_ap_ps_peer_cmd *cmd;
  2363. struct wmi_tlv *tlv;
  2364. struct sk_buff *skb;
  2365. if (!mac)
  2366. return ERR_PTR(-EINVAL);
  2367. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2368. if (!skb)
  2369. return ERR_PTR(-ENOMEM);
  2370. tlv = (void *)skb->data;
  2371. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
  2372. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2373. cmd = (void *)tlv->value;
  2374. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2375. cmd->param_id = __cpu_to_le32(param_id);
  2376. cmd->param_value = __cpu_to_le32(value);
  2377. ether_addr_copy(cmd->peer_macaddr.addr, mac);
  2378. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
  2379. return skb;
  2380. }
  2381. static struct sk_buff *
  2382. ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
  2383. const struct wmi_scan_chan_list_arg *arg)
  2384. {
  2385. struct wmi_tlv_scan_chan_list_cmd *cmd;
  2386. struct wmi_channel *ci;
  2387. struct wmi_channel_arg *ch;
  2388. struct wmi_tlv *tlv;
  2389. struct sk_buff *skb;
  2390. size_t chans_len, len;
  2391. int i;
  2392. void *ptr, *chans;
  2393. chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
  2394. len = (sizeof(*tlv) + sizeof(*cmd)) +
  2395. (sizeof(*tlv) + chans_len);
  2396. skb = ath10k_wmi_alloc_skb(ar, len);
  2397. if (!skb)
  2398. return ERR_PTR(-ENOMEM);
  2399. ptr = (void *)skb->data;
  2400. tlv = ptr;
  2401. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
  2402. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2403. cmd = (void *)tlv->value;
  2404. cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
  2405. ptr += sizeof(*tlv);
  2406. ptr += sizeof(*cmd);
  2407. tlv = ptr;
  2408. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  2409. tlv->len = __cpu_to_le16(chans_len);
  2410. chans = (void *)tlv->value;
  2411. for (i = 0; i < arg->n_channels; i++) {
  2412. ch = &arg->channels[i];
  2413. tlv = chans;
  2414. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
  2415. tlv->len = __cpu_to_le16(sizeof(*ci));
  2416. ci = (void *)tlv->value;
  2417. ath10k_wmi_put_wmi_channel(ar, ci, ch);
  2418. chans += sizeof(*tlv);
  2419. chans += sizeof(*ci);
  2420. }
  2421. ptr += sizeof(*tlv);
  2422. ptr += chans_len;
  2423. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
  2424. return skb;
  2425. }
  2426. static struct sk_buff *
  2427. ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
  2428. {
  2429. struct wmi_scan_prob_req_oui_cmd *cmd;
  2430. struct wmi_tlv *tlv;
  2431. struct sk_buff *skb;
  2432. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2433. if (!skb)
  2434. return ERR_PTR(-ENOMEM);
  2435. tlv = (void *)skb->data;
  2436. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
  2437. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2438. cmd = (void *)tlv->value;
  2439. cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
  2440. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
  2441. return skb;
  2442. }
  2443. static struct sk_buff *
  2444. ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
  2445. const void *bcn, size_t bcn_len,
  2446. u32 bcn_paddr, bool dtim_zero,
  2447. bool deliver_cab)
  2448. {
  2449. struct wmi_bcn_tx_ref_cmd *cmd;
  2450. struct wmi_tlv *tlv;
  2451. struct sk_buff *skb;
  2452. struct ieee80211_hdr *hdr;
  2453. u16 fc;
  2454. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2455. if (!skb)
  2456. return ERR_PTR(-ENOMEM);
  2457. hdr = (struct ieee80211_hdr *)bcn;
  2458. fc = le16_to_cpu(hdr->frame_control);
  2459. tlv = (void *)skb->data;
  2460. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
  2461. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2462. cmd = (void *)tlv->value;
  2463. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2464. cmd->data_len = __cpu_to_le32(bcn_len);
  2465. cmd->data_ptr = __cpu_to_le32(bcn_paddr);
  2466. cmd->msdu_id = 0;
  2467. cmd->frame_control = __cpu_to_le32(fc);
  2468. cmd->flags = 0;
  2469. if (dtim_zero)
  2470. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
  2471. if (deliver_cab)
  2472. cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
  2473. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
  2474. return skb;
  2475. }
  2476. static struct sk_buff *
  2477. ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
  2478. const struct wmi_wmm_params_all_arg *arg)
  2479. {
  2480. struct wmi_tlv_pdev_set_wmm_cmd *cmd;
  2481. struct wmi_wmm_params *wmm;
  2482. struct wmi_tlv *tlv;
  2483. struct sk_buff *skb;
  2484. size_t len;
  2485. void *ptr;
  2486. len = (sizeof(*tlv) + sizeof(*cmd)) +
  2487. (4 * (sizeof(*tlv) + sizeof(*wmm)));
  2488. skb = ath10k_wmi_alloc_skb(ar, len);
  2489. if (!skb)
  2490. return ERR_PTR(-ENOMEM);
  2491. ptr = (void *)skb->data;
  2492. tlv = ptr;
  2493. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
  2494. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2495. cmd = (void *)tlv->value;
  2496. /* nothing to set here */
  2497. ptr += sizeof(*tlv);
  2498. ptr += sizeof(*cmd);
  2499. ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
  2500. ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
  2501. ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
  2502. ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
  2503. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
  2504. return skb;
  2505. }
  2506. static struct sk_buff *
  2507. ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
  2508. {
  2509. struct wmi_request_stats_cmd *cmd;
  2510. struct wmi_tlv *tlv;
  2511. struct sk_buff *skb;
  2512. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2513. if (!skb)
  2514. return ERR_PTR(-ENOMEM);
  2515. tlv = (void *)skb->data;
  2516. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
  2517. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2518. cmd = (void *)tlv->value;
  2519. cmd->stats_id = __cpu_to_le32(stats_mask);
  2520. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
  2521. return skb;
  2522. }
  2523. static struct sk_buff *
  2524. ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
  2525. u32 vdev_id,
  2526. enum wmi_peer_stats_info_request_type type,
  2527. u8 *addr,
  2528. u32 reset)
  2529. {
  2530. struct wmi_tlv_request_peer_stats_info *cmd;
  2531. struct wmi_tlv *tlv;
  2532. struct sk_buff *skb;
  2533. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2534. if (!skb)
  2535. return ERR_PTR(-ENOMEM);
  2536. tlv = (void *)skb->data;
  2537. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
  2538. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2539. cmd = (void *)tlv->value;
  2540. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2541. cmd->request_type = __cpu_to_le32(type);
  2542. if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
  2543. ether_addr_copy(cmd->peer_macaddr.addr, addr);
  2544. cmd->reset_after_request = __cpu_to_le32(reset);
  2545. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
  2546. return skb;
  2547. }
  2548. static int
  2549. ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
  2550. struct sk_buff *msdu)
  2551. {
  2552. struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
  2553. struct ath10k_wmi *wmi = &ar->wmi;
  2554. idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
  2555. return 0;
  2556. }
  2557. static int
  2558. ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
  2559. dma_addr_t paddr)
  2560. {
  2561. struct ath10k_wmi *wmi = &ar->wmi;
  2562. struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
  2563. int ret;
  2564. pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
  2565. if (!pkt_addr)
  2566. return -ENOMEM;
  2567. pkt_addr->vaddr = skb;
  2568. pkt_addr->paddr = paddr;
  2569. spin_lock_bh(&ar->data_lock);
  2570. ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
  2571. wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
  2572. spin_unlock_bh(&ar->data_lock);
  2573. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
  2574. return ret;
  2575. }
  2576. static struct sk_buff *
  2577. ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
  2578. dma_addr_t paddr)
  2579. {
  2580. struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
  2581. struct wmi_tlv_mgmt_tx_cmd *cmd;
  2582. struct ieee80211_hdr *hdr;
  2583. struct ath10k_vif *arvif;
  2584. u32 buf_len = msdu->len;
  2585. struct wmi_tlv *tlv;
  2586. struct sk_buff *skb;
  2587. int len, desc_id;
  2588. u32 vdev_id;
  2589. void *ptr;
  2590. if (!cb->vif)
  2591. return ERR_PTR(-EINVAL);
  2592. hdr = (struct ieee80211_hdr *)msdu->data;
  2593. arvif = (void *)cb->vif->drv_priv;
  2594. vdev_id = arvif->vdev_id;
  2595. if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) &&
  2596. (!(ieee80211_is_nullfunc(hdr->frame_control) ||
  2597. ieee80211_is_qos_nullfunc(hdr->frame_control)))))
  2598. return ERR_PTR(-EINVAL);
  2599. len = sizeof(*cmd) + 2 * sizeof(*tlv);
  2600. if ((ieee80211_is_action(hdr->frame_control) ||
  2601. ieee80211_is_deauth(hdr->frame_control) ||
  2602. ieee80211_is_disassoc(hdr->frame_control)) &&
  2603. ieee80211_has_protected(hdr->frame_control)) {
  2604. skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
  2605. buf_len += IEEE80211_CCMP_MIC_LEN;
  2606. }
  2607. buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
  2608. buf_len = round_up(buf_len, 4);
  2609. len += buf_len;
  2610. len = round_up(len, 4);
  2611. skb = ath10k_wmi_alloc_skb(ar, len);
  2612. if (!skb)
  2613. return ERR_PTR(-ENOMEM);
  2614. desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
  2615. if (desc_id < 0)
  2616. goto err_free_skb;
  2617. cb->msdu_id = desc_id;
  2618. ptr = (void *)skb->data;
  2619. tlv = ptr;
  2620. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
  2621. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2622. cmd = (void *)tlv->value;
  2623. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2624. cmd->desc_id = __cpu_to_le32(desc_id);
  2625. cmd->chanfreq = 0;
  2626. cmd->buf_len = __cpu_to_le32(buf_len);
  2627. cmd->frame_len = __cpu_to_le32(msdu->len);
  2628. cmd->paddr = __cpu_to_le64(paddr);
  2629. ptr += sizeof(*tlv);
  2630. ptr += sizeof(*cmd);
  2631. tlv = ptr;
  2632. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  2633. tlv->len = __cpu_to_le16(buf_len);
  2634. ptr += sizeof(*tlv);
  2635. memcpy(ptr, msdu->data, buf_len);
  2636. return skb;
  2637. err_free_skb:
  2638. dev_kfree_skb(skb);
  2639. return ERR_PTR(desc_id);
  2640. }
  2641. static struct sk_buff *
  2642. ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
  2643. enum wmi_force_fw_hang_type type,
  2644. u32 delay_ms)
  2645. {
  2646. struct wmi_force_fw_hang_cmd *cmd;
  2647. struct wmi_tlv *tlv;
  2648. struct sk_buff *skb;
  2649. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2650. if (!skb)
  2651. return ERR_PTR(-ENOMEM);
  2652. tlv = (void *)skb->data;
  2653. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
  2654. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2655. cmd = (void *)tlv->value;
  2656. cmd->type = __cpu_to_le32(type);
  2657. cmd->delay_ms = __cpu_to_le32(delay_ms);
  2658. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
  2659. return skb;
  2660. }
  2661. static struct sk_buff *
  2662. ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
  2663. u32 log_level)
  2664. {
  2665. struct wmi_tlv_dbglog_cmd *cmd;
  2666. struct wmi_tlv *tlv;
  2667. struct sk_buff *skb;
  2668. size_t len, bmap_len;
  2669. u32 value;
  2670. void *ptr;
  2671. if (module_enable) {
  2672. value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
  2673. module_enable,
  2674. WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
  2675. } else {
  2676. value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
  2677. WMI_TLV_DBGLOG_ALL_MODULES,
  2678. WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
  2679. }
  2680. bmap_len = 0;
  2681. len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
  2682. skb = ath10k_wmi_alloc_skb(ar, len);
  2683. if (!skb)
  2684. return ERR_PTR(-ENOMEM);
  2685. ptr = (void *)skb->data;
  2686. tlv = ptr;
  2687. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
  2688. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2689. cmd = (void *)tlv->value;
  2690. cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
  2691. cmd->value = __cpu_to_le32(value);
  2692. ptr += sizeof(*tlv);
  2693. ptr += sizeof(*cmd);
  2694. tlv = ptr;
  2695. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
  2696. tlv->len = __cpu_to_le16(bmap_len);
  2697. /* nothing to do here */
  2698. ptr += sizeof(*tlv);
  2699. ptr += sizeof(bmap_len);
  2700. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
  2701. return skb;
  2702. }
  2703. static struct sk_buff *
  2704. ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
  2705. {
  2706. struct wmi_tlv_pktlog_enable *cmd;
  2707. struct wmi_tlv *tlv;
  2708. struct sk_buff *skb;
  2709. void *ptr;
  2710. size_t len;
  2711. len = sizeof(*tlv) + sizeof(*cmd);
  2712. skb = ath10k_wmi_alloc_skb(ar, len);
  2713. if (!skb)
  2714. return ERR_PTR(-ENOMEM);
  2715. ptr = (void *)skb->data;
  2716. tlv = ptr;
  2717. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
  2718. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2719. cmd = (void *)tlv->value;
  2720. cmd->filter = __cpu_to_le32(filter);
  2721. ptr += sizeof(*tlv);
  2722. ptr += sizeof(*cmd);
  2723. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
  2724. filter);
  2725. return skb;
  2726. }
  2727. static struct sk_buff *
  2728. ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar)
  2729. {
  2730. struct wmi_tlv_pdev_get_temp_cmd *cmd;
  2731. struct wmi_tlv *tlv;
  2732. struct sk_buff *skb;
  2733. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  2734. if (!skb)
  2735. return ERR_PTR(-ENOMEM);
  2736. tlv = (void *)skb->data;
  2737. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD);
  2738. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2739. cmd = (void *)tlv->value;
  2740. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n");
  2741. return skb;
  2742. }
  2743. static struct sk_buff *
  2744. ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
  2745. {
  2746. struct wmi_tlv_pktlog_disable *cmd;
  2747. struct wmi_tlv *tlv;
  2748. struct sk_buff *skb;
  2749. void *ptr;
  2750. size_t len;
  2751. len = sizeof(*tlv) + sizeof(*cmd);
  2752. skb = ath10k_wmi_alloc_skb(ar, len);
  2753. if (!skb)
  2754. return ERR_PTR(-ENOMEM);
  2755. ptr = (void *)skb->data;
  2756. tlv = ptr;
  2757. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
  2758. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2759. cmd = (void *)tlv->value;
  2760. ptr += sizeof(*tlv);
  2761. ptr += sizeof(*cmd);
  2762. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
  2763. return skb;
  2764. }
  2765. static struct sk_buff *
  2766. ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
  2767. u32 tim_ie_offset, struct sk_buff *bcn,
  2768. u32 prb_caps, u32 prb_erp, void *prb_ies,
  2769. size_t prb_ies_len)
  2770. {
  2771. struct wmi_tlv_bcn_tmpl_cmd *cmd;
  2772. struct wmi_tlv_bcn_prb_info *info;
  2773. struct wmi_tlv *tlv;
  2774. struct sk_buff *skb;
  2775. void *ptr;
  2776. size_t len;
  2777. if (WARN_ON(prb_ies_len > 0 && !prb_ies))
  2778. return ERR_PTR(-EINVAL);
  2779. len = sizeof(*tlv) + sizeof(*cmd) +
  2780. sizeof(*tlv) + sizeof(*info) + prb_ies_len +
  2781. sizeof(*tlv) + roundup(bcn->len, 4);
  2782. skb = ath10k_wmi_alloc_skb(ar, len);
  2783. if (!skb)
  2784. return ERR_PTR(-ENOMEM);
  2785. ptr = (void *)skb->data;
  2786. tlv = ptr;
  2787. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
  2788. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2789. cmd = (void *)tlv->value;
  2790. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2791. cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
  2792. cmd->buf_len = __cpu_to_le32(bcn->len);
  2793. ptr += sizeof(*tlv);
  2794. ptr += sizeof(*cmd);
  2795. /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
  2796. * then it is then impossible to pass original ie len.
  2797. * This chunk is not used yet so if setting probe resp template yields
  2798. * problems with beaconing or crashes firmware look here.
  2799. */
  2800. tlv = ptr;
  2801. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
  2802. tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
  2803. info = (void *)tlv->value;
  2804. info->caps = __cpu_to_le32(prb_caps);
  2805. info->erp = __cpu_to_le32(prb_erp);
  2806. memcpy(info->ies, prb_ies, prb_ies_len);
  2807. ptr += sizeof(*tlv);
  2808. ptr += sizeof(*info);
  2809. ptr += prb_ies_len;
  2810. tlv = ptr;
  2811. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  2812. tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
  2813. memcpy(tlv->value, bcn->data, bcn->len);
  2814. /* FIXME: Adjust TSF? */
  2815. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
  2816. vdev_id);
  2817. return skb;
  2818. }
  2819. static struct sk_buff *
  2820. ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
  2821. struct sk_buff *prb)
  2822. {
  2823. struct wmi_tlv_prb_tmpl_cmd *cmd;
  2824. struct wmi_tlv_bcn_prb_info *info;
  2825. struct wmi_tlv *tlv;
  2826. struct sk_buff *skb;
  2827. void *ptr;
  2828. size_t len;
  2829. len = sizeof(*tlv) + sizeof(*cmd) +
  2830. sizeof(*tlv) + sizeof(*info) +
  2831. sizeof(*tlv) + roundup(prb->len, 4);
  2832. skb = ath10k_wmi_alloc_skb(ar, len);
  2833. if (!skb)
  2834. return ERR_PTR(-ENOMEM);
  2835. ptr = (void *)skb->data;
  2836. tlv = ptr;
  2837. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
  2838. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2839. cmd = (void *)tlv->value;
  2840. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2841. cmd->buf_len = __cpu_to_le32(prb->len);
  2842. ptr += sizeof(*tlv);
  2843. ptr += sizeof(*cmd);
  2844. tlv = ptr;
  2845. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
  2846. tlv->len = __cpu_to_le16(sizeof(*info));
  2847. info = (void *)tlv->value;
  2848. info->caps = 0;
  2849. info->erp = 0;
  2850. ptr += sizeof(*tlv);
  2851. ptr += sizeof(*info);
  2852. tlv = ptr;
  2853. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  2854. tlv->len = __cpu_to_le16(roundup(prb->len, 4));
  2855. memcpy(tlv->value, prb->data, prb->len);
  2856. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
  2857. vdev_id);
  2858. return skb;
  2859. }
  2860. static struct sk_buff *
  2861. ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
  2862. const u8 *p2p_ie)
  2863. {
  2864. struct wmi_tlv_p2p_go_bcn_ie *cmd;
  2865. struct wmi_tlv *tlv;
  2866. struct sk_buff *skb;
  2867. void *ptr;
  2868. size_t len;
  2869. len = sizeof(*tlv) + sizeof(*cmd) +
  2870. sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
  2871. skb = ath10k_wmi_alloc_skb(ar, len);
  2872. if (!skb)
  2873. return ERR_PTR(-ENOMEM);
  2874. ptr = (void *)skb->data;
  2875. tlv = ptr;
  2876. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
  2877. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2878. cmd = (void *)tlv->value;
  2879. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2880. cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
  2881. ptr += sizeof(*tlv);
  2882. ptr += sizeof(*cmd);
  2883. tlv = ptr;
  2884. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  2885. tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
  2886. memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
  2887. ptr += sizeof(*tlv);
  2888. ptr += roundup(p2p_ie[1] + 2, 4);
  2889. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
  2890. vdev_id);
  2891. return skb;
  2892. }
  2893. static struct sk_buff *
  2894. ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
  2895. enum wmi_tdls_state state)
  2896. {
  2897. struct wmi_tdls_set_state_cmd *cmd;
  2898. struct wmi_tlv *tlv;
  2899. struct sk_buff *skb;
  2900. void *ptr;
  2901. size_t len;
  2902. /* Set to options from wmi_tlv_tdls_options,
  2903. * for now none of them are enabled.
  2904. */
  2905. u32 options = 0;
  2906. if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
  2907. options |= WMI_TLV_TDLS_BUFFER_STA_EN;
  2908. /* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS
  2909. * link inactivity detecting logic.
  2910. */
  2911. if (state == WMI_TDLS_ENABLE_ACTIVE)
  2912. state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL;
  2913. len = sizeof(*tlv) + sizeof(*cmd);
  2914. skb = ath10k_wmi_alloc_skb(ar, len);
  2915. if (!skb)
  2916. return ERR_PTR(-ENOMEM);
  2917. ptr = (void *)skb->data;
  2918. tlv = ptr;
  2919. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
  2920. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2921. cmd = (void *)tlv->value;
  2922. cmd->vdev_id = __cpu_to_le32(vdev_id);
  2923. cmd->state = __cpu_to_le32(state);
  2924. cmd->notification_interval_ms = __cpu_to_le32(5000);
  2925. cmd->tx_discovery_threshold = __cpu_to_le32(100);
  2926. cmd->tx_teardown_threshold = __cpu_to_le32(5);
  2927. cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
  2928. cmd->rssi_delta = __cpu_to_le32(-20);
  2929. cmd->tdls_options = __cpu_to_le32(options);
  2930. cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
  2931. cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
  2932. cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
  2933. cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
  2934. cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
  2935. ptr += sizeof(*tlv);
  2936. ptr += sizeof(*cmd);
  2937. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
  2938. state, vdev_id);
  2939. return skb;
  2940. }
  2941. static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
  2942. {
  2943. u32 peer_qos = 0;
  2944. if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
  2945. peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
  2946. if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
  2947. peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
  2948. if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
  2949. peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
  2950. if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
  2951. peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
  2952. peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
  2953. return peer_qos;
  2954. }
  2955. static struct sk_buff *
  2956. ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
  2957. const struct wmi_tdls_peer_update_cmd_arg *arg,
  2958. const struct wmi_tdls_peer_capab_arg *cap,
  2959. const struct wmi_channel_arg *chan_arg)
  2960. {
  2961. struct wmi_tdls_peer_update_cmd *cmd;
  2962. struct wmi_tdls_peer_capab *peer_cap;
  2963. struct wmi_channel *chan;
  2964. struct wmi_tlv *tlv;
  2965. struct sk_buff *skb;
  2966. u32 peer_qos;
  2967. void *ptr;
  2968. int len;
  2969. int i;
  2970. len = sizeof(*tlv) + sizeof(*cmd) +
  2971. sizeof(*tlv) + sizeof(*peer_cap) +
  2972. sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
  2973. skb = ath10k_wmi_alloc_skb(ar, len);
  2974. if (!skb)
  2975. return ERR_PTR(-ENOMEM);
  2976. ptr = (void *)skb->data;
  2977. tlv = ptr;
  2978. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
  2979. tlv->len = __cpu_to_le16(sizeof(*cmd));
  2980. cmd = (void *)tlv->value;
  2981. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  2982. ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
  2983. cmd->peer_state = __cpu_to_le32(arg->peer_state);
  2984. ptr += sizeof(*tlv);
  2985. ptr += sizeof(*cmd);
  2986. tlv = ptr;
  2987. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
  2988. tlv->len = __cpu_to_le16(sizeof(*peer_cap));
  2989. peer_cap = (void *)tlv->value;
  2990. peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
  2991. cap->peer_max_sp);
  2992. peer_cap->peer_qos = __cpu_to_le32(peer_qos);
  2993. peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
  2994. peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
  2995. peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
  2996. peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
  2997. peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
  2998. peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
  2999. for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
  3000. peer_cap->peer_operclass[i] = cap->peer_operclass[i];
  3001. peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
  3002. peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
  3003. peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
  3004. ptr += sizeof(*tlv);
  3005. ptr += sizeof(*peer_cap);
  3006. tlv = ptr;
  3007. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  3008. tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
  3009. ptr += sizeof(*tlv);
  3010. for (i = 0; i < cap->peer_chan_len; i++) {
  3011. tlv = ptr;
  3012. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
  3013. tlv->len = __cpu_to_le16(sizeof(*chan));
  3014. chan = (void *)tlv->value;
  3015. ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
  3016. ptr += sizeof(*tlv);
  3017. ptr += sizeof(*chan);
  3018. }
  3019. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3020. "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
  3021. arg->vdev_id, arg->peer_state, cap->peer_chan_len);
  3022. return skb;
  3023. }
  3024. static struct sk_buff *
  3025. ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
  3026. u32 duration, u32 next_offset,
  3027. u32 enabled)
  3028. {
  3029. struct wmi_tlv_set_quiet_cmd *cmd;
  3030. struct wmi_tlv *tlv;
  3031. struct sk_buff *skb;
  3032. skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
  3033. if (!skb)
  3034. return ERR_PTR(-ENOMEM);
  3035. tlv = (void *)skb->data;
  3036. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD);
  3037. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3038. cmd = (void *)tlv->value;
  3039. /* vdev_id is not in use, set to 0 */
  3040. cmd->vdev_id = __cpu_to_le32(0);
  3041. cmd->period = __cpu_to_le32(period);
  3042. cmd->duration = __cpu_to_le32(duration);
  3043. cmd->next_start = __cpu_to_le32(next_offset);
  3044. cmd->enabled = __cpu_to_le32(enabled);
  3045. ath10k_dbg(ar, ATH10K_DBG_WMI,
  3046. "wmi tlv quiet param: period %u duration %u enabled %d\n",
  3047. period, duration, enabled);
  3048. return skb;
  3049. }
  3050. static struct sk_buff *
  3051. ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
  3052. {
  3053. struct wmi_tlv_wow_enable_cmd *cmd;
  3054. struct wmi_tlv *tlv;
  3055. struct sk_buff *skb;
  3056. size_t len;
  3057. len = sizeof(*tlv) + sizeof(*cmd);
  3058. skb = ath10k_wmi_alloc_skb(ar, len);
  3059. if (!skb)
  3060. return ERR_PTR(-ENOMEM);
  3061. tlv = (struct wmi_tlv *)skb->data;
  3062. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
  3063. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3064. cmd = (void *)tlv->value;
  3065. cmd->enable = __cpu_to_le32(1);
  3066. if (!ar->bus_param.link_can_suspend)
  3067. cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED);
  3068. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
  3069. return skb;
  3070. }
  3071. static struct sk_buff *
  3072. ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
  3073. u32 vdev_id,
  3074. enum wmi_wow_wakeup_event event,
  3075. u32 enable)
  3076. {
  3077. struct wmi_tlv_wow_add_del_event_cmd *cmd;
  3078. struct wmi_tlv *tlv;
  3079. struct sk_buff *skb;
  3080. size_t len;
  3081. len = sizeof(*tlv) + sizeof(*cmd);
  3082. skb = ath10k_wmi_alloc_skb(ar, len);
  3083. if (!skb)
  3084. return ERR_PTR(-ENOMEM);
  3085. tlv = (struct wmi_tlv *)skb->data;
  3086. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
  3087. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3088. cmd = (void *)tlv->value;
  3089. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3090. cmd->is_add = __cpu_to_le32(enable);
  3091. cmd->event_bitmap = __cpu_to_le32(1 << event);
  3092. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
  3093. wow_wakeup_event(event), enable, vdev_id);
  3094. return skb;
  3095. }
  3096. static struct sk_buff *
  3097. ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
  3098. {
  3099. struct wmi_tlv_wow_host_wakeup_ind *cmd;
  3100. struct wmi_tlv *tlv;
  3101. struct sk_buff *skb;
  3102. size_t len;
  3103. len = sizeof(*tlv) + sizeof(*cmd);
  3104. skb = ath10k_wmi_alloc_skb(ar, len);
  3105. if (!skb)
  3106. return ERR_PTR(-ENOMEM);
  3107. tlv = (struct wmi_tlv *)skb->data;
  3108. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
  3109. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3110. cmd = (void *)tlv->value;
  3111. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
  3112. return skb;
  3113. }
  3114. static struct sk_buff *
  3115. ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
  3116. u32 pattern_id, const u8 *pattern,
  3117. const u8 *bitmask, int pattern_len,
  3118. int pattern_offset)
  3119. {
  3120. struct wmi_tlv_wow_add_pattern_cmd *cmd;
  3121. struct wmi_tlv_wow_bitmap_pattern *bitmap;
  3122. struct wmi_tlv *tlv;
  3123. struct sk_buff *skb;
  3124. void *ptr;
  3125. size_t len;
  3126. len = sizeof(*tlv) + sizeof(*cmd) +
  3127. sizeof(*tlv) + /* array struct */
  3128. sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
  3129. sizeof(*tlv) + /* empty ipv4 sync */
  3130. sizeof(*tlv) + /* empty ipv6 sync */
  3131. sizeof(*tlv) + /* empty magic */
  3132. sizeof(*tlv) + /* empty info timeout */
  3133. sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
  3134. skb = ath10k_wmi_alloc_skb(ar, len);
  3135. if (!skb)
  3136. return ERR_PTR(-ENOMEM);
  3137. /* cmd */
  3138. ptr = (void *)skb->data;
  3139. tlv = ptr;
  3140. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
  3141. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3142. cmd = (void *)tlv->value;
  3143. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3144. cmd->pattern_id = __cpu_to_le32(pattern_id);
  3145. cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
  3146. ptr += sizeof(*tlv);
  3147. ptr += sizeof(*cmd);
  3148. /* bitmap */
  3149. tlv = ptr;
  3150. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  3151. tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
  3152. ptr += sizeof(*tlv);
  3153. tlv = ptr;
  3154. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
  3155. tlv->len = __cpu_to_le16(sizeof(*bitmap));
  3156. bitmap = (void *)tlv->value;
  3157. memcpy(bitmap->patternbuf, pattern, pattern_len);
  3158. memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
  3159. bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
  3160. bitmap->pattern_len = __cpu_to_le32(pattern_len);
  3161. bitmap->bitmask_len = __cpu_to_le32(pattern_len);
  3162. bitmap->pattern_id = __cpu_to_le32(pattern_id);
  3163. ptr += sizeof(*tlv);
  3164. ptr += sizeof(*bitmap);
  3165. /* ipv4 sync */
  3166. tlv = ptr;
  3167. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  3168. tlv->len = __cpu_to_le16(0);
  3169. ptr += sizeof(*tlv);
  3170. /* ipv6 sync */
  3171. tlv = ptr;
  3172. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  3173. tlv->len = __cpu_to_le16(0);
  3174. ptr += sizeof(*tlv);
  3175. /* magic */
  3176. tlv = ptr;
  3177. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  3178. tlv->len = __cpu_to_le16(0);
  3179. ptr += sizeof(*tlv);
  3180. /* pattern info timeout */
  3181. tlv = ptr;
  3182. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
  3183. tlv->len = __cpu_to_le16(0);
  3184. ptr += sizeof(*tlv);
  3185. /* ratelimit interval */
  3186. tlv = ptr;
  3187. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
  3188. tlv->len = __cpu_to_le16(sizeof(u32));
  3189. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
  3190. vdev_id, pattern_id, pattern_offset);
  3191. return skb;
  3192. }
  3193. static struct sk_buff *
  3194. ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
  3195. u32 pattern_id)
  3196. {
  3197. struct wmi_tlv_wow_del_pattern_cmd *cmd;
  3198. struct wmi_tlv *tlv;
  3199. struct sk_buff *skb;
  3200. size_t len;
  3201. len = sizeof(*tlv) + sizeof(*cmd);
  3202. skb = ath10k_wmi_alloc_skb(ar, len);
  3203. if (!skb)
  3204. return ERR_PTR(-ENOMEM);
  3205. tlv = (struct wmi_tlv *)skb->data;
  3206. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
  3207. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3208. cmd = (void *)tlv->value;
  3209. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3210. cmd->pattern_id = __cpu_to_le32(pattern_id);
  3211. cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
  3212. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
  3213. vdev_id, pattern_id);
  3214. return skb;
  3215. }
  3216. /* Request FW to start PNO operation */
  3217. static struct sk_buff *
  3218. ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
  3219. u32 vdev_id,
  3220. struct wmi_pno_scan_req *pno)
  3221. {
  3222. struct nlo_configured_parameters *nlo_list;
  3223. struct wmi_tlv_wow_nlo_config_cmd *cmd;
  3224. struct wmi_tlv *tlv;
  3225. struct sk_buff *skb;
  3226. __le32 *channel_list;
  3227. u16 tlv_len;
  3228. size_t len;
  3229. void *ptr;
  3230. u32 i;
  3231. len = sizeof(*tlv) + sizeof(*cmd) +
  3232. sizeof(*tlv) +
  3233. /* TLV place holder for array of structures
  3234. * nlo_configured_parameters(nlo_list)
  3235. */
  3236. sizeof(*tlv);
  3237. /* TLV place holder for array of uint32 channel_list */
  3238. len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
  3239. WMI_NLO_MAX_CHAN);
  3240. len += sizeof(struct nlo_configured_parameters) *
  3241. min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
  3242. skb = ath10k_wmi_alloc_skb(ar, len);
  3243. if (!skb)
  3244. return ERR_PTR(-ENOMEM);
  3245. ptr = (void *)skb->data;
  3246. tlv = ptr;
  3247. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
  3248. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3249. cmd = (void *)tlv->value;
  3250. /* wmi_tlv_wow_nlo_config_cmd parameters*/
  3251. cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
  3252. cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
  3253. /* current FW does not support min-max range for dwell time */
  3254. cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
  3255. cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
  3256. if (pno->do_passive_scan)
  3257. cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
  3258. /* copy scan interval */
  3259. cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
  3260. cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
  3261. cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
  3262. cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
  3263. if (pno->enable_pno_scan_randomization) {
  3264. cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
  3265. WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
  3266. ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
  3267. ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
  3268. }
  3269. ptr += sizeof(*tlv);
  3270. ptr += sizeof(*cmd);
  3271. /* nlo_configured_parameters(nlo_list) */
  3272. cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
  3273. WMI_NLO_MAX_SSIDS));
  3274. tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
  3275. sizeof(struct nlo_configured_parameters);
  3276. tlv = ptr;
  3277. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  3278. tlv->len = __cpu_to_le16(tlv_len);
  3279. ptr += sizeof(*tlv);
  3280. nlo_list = ptr;
  3281. for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
  3282. tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
  3283. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
  3284. tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
  3285. sizeof(*tlv));
  3286. /* copy ssid and it's length */
  3287. nlo_list[i].ssid.valid = __cpu_to_le32(true);
  3288. nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
  3289. memcpy(nlo_list[i].ssid.ssid.ssid,
  3290. pno->a_networks[i].ssid.ssid,
  3291. __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
  3292. /* copy rssi threshold */
  3293. if (pno->a_networks[i].rssi_threshold &&
  3294. pno->a_networks[i].rssi_threshold > -300) {
  3295. nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
  3296. nlo_list[i].rssi_cond.rssi =
  3297. __cpu_to_le32(pno->a_networks[i].rssi_threshold);
  3298. }
  3299. nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
  3300. nlo_list[i].bcast_nw_type.bcast_nw_type =
  3301. __cpu_to_le32(pno->a_networks[i].bcast_nw_type);
  3302. }
  3303. ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
  3304. /* copy channel info */
  3305. cmd->num_of_channels = __cpu_to_le32(min_t(u8,
  3306. pno->a_networks[0].channel_count,
  3307. WMI_NLO_MAX_CHAN));
  3308. tlv = ptr;
  3309. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
  3310. tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
  3311. sizeof(u_int32_t));
  3312. ptr += sizeof(*tlv);
  3313. channel_list = (__le32 *)ptr;
  3314. for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
  3315. channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
  3316. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
  3317. vdev_id);
  3318. return skb;
  3319. }
  3320. /* Request FW to stop ongoing PNO operation */
  3321. static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
  3322. u32 vdev_id)
  3323. {
  3324. struct wmi_tlv_wow_nlo_config_cmd *cmd;
  3325. struct wmi_tlv *tlv;
  3326. struct sk_buff *skb;
  3327. void *ptr;
  3328. size_t len;
  3329. len = sizeof(*tlv) + sizeof(*cmd) +
  3330. sizeof(*tlv) +
  3331. /* TLV place holder for array of structures
  3332. * nlo_configured_parameters(nlo_list)
  3333. */
  3334. sizeof(*tlv);
  3335. /* TLV place holder for array of uint32 channel_list */
  3336. skb = ath10k_wmi_alloc_skb(ar, len);
  3337. if (!skb)
  3338. return ERR_PTR(-ENOMEM);
  3339. ptr = (void *)skb->data;
  3340. tlv = ptr;
  3341. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
  3342. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3343. cmd = (void *)tlv->value;
  3344. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3345. cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
  3346. ptr += sizeof(*tlv);
  3347. ptr += sizeof(*cmd);
  3348. /* nlo_configured_parameters(nlo_list) */
  3349. tlv = ptr;
  3350. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
  3351. tlv->len = __cpu_to_le16(0);
  3352. ptr += sizeof(*tlv);
  3353. /* channel list */
  3354. tlv = ptr;
  3355. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
  3356. tlv->len = __cpu_to_le16(0);
  3357. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
  3358. return skb;
  3359. }
  3360. static struct sk_buff *
  3361. ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
  3362. struct wmi_pno_scan_req *pno_scan)
  3363. {
  3364. if (pno_scan->enable)
  3365. return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
  3366. else
  3367. return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
  3368. }
  3369. static struct sk_buff *
  3370. ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
  3371. {
  3372. struct wmi_tlv_adaptive_qcs *cmd;
  3373. struct wmi_tlv *tlv;
  3374. struct sk_buff *skb;
  3375. void *ptr;
  3376. size_t len;
  3377. len = sizeof(*tlv) + sizeof(*cmd);
  3378. skb = ath10k_wmi_alloc_skb(ar, len);
  3379. if (!skb)
  3380. return ERR_PTR(-ENOMEM);
  3381. ptr = (void *)skb->data;
  3382. tlv = ptr;
  3383. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
  3384. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3385. cmd = (void *)tlv->value;
  3386. cmd->enable = __cpu_to_le32(enable ? 1 : 0);
  3387. ptr += sizeof(*tlv);
  3388. ptr += sizeof(*cmd);
  3389. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
  3390. return skb;
  3391. }
  3392. static struct sk_buff *
  3393. ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
  3394. {
  3395. struct wmi_echo_cmd *cmd;
  3396. struct wmi_tlv *tlv;
  3397. struct sk_buff *skb;
  3398. void *ptr;
  3399. size_t len;
  3400. len = sizeof(*tlv) + sizeof(*cmd);
  3401. skb = ath10k_wmi_alloc_skb(ar, len);
  3402. if (!skb)
  3403. return ERR_PTR(-ENOMEM);
  3404. ptr = (void *)skb->data;
  3405. tlv = ptr;
  3406. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
  3407. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3408. cmd = (void *)tlv->value;
  3409. cmd->value = cpu_to_le32(value);
  3410. ptr += sizeof(*tlv);
  3411. ptr += sizeof(*cmd);
  3412. ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
  3413. return skb;
  3414. }
  3415. static struct sk_buff *
  3416. ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
  3417. const struct wmi_vdev_spectral_conf_arg *arg)
  3418. {
  3419. struct wmi_vdev_spectral_conf_cmd *cmd;
  3420. struct sk_buff *skb;
  3421. struct wmi_tlv *tlv;
  3422. void *ptr;
  3423. size_t len;
  3424. len = sizeof(*tlv) + sizeof(*cmd);
  3425. skb = ath10k_wmi_alloc_skb(ar, len);
  3426. if (!skb)
  3427. return ERR_PTR(-ENOMEM);
  3428. ptr = (void *)skb->data;
  3429. tlv = ptr;
  3430. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
  3431. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3432. cmd = (void *)tlv->value;
  3433. cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
  3434. cmd->scan_count = __cpu_to_le32(arg->scan_count);
  3435. cmd->scan_period = __cpu_to_le32(arg->scan_period);
  3436. cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
  3437. cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
  3438. cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
  3439. cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
  3440. cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
  3441. cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
  3442. cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
  3443. cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
  3444. cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
  3445. cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
  3446. cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
  3447. cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
  3448. cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
  3449. cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
  3450. cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
  3451. cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
  3452. return skb;
  3453. }
  3454. static struct sk_buff *
  3455. ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
  3456. u32 trigger, u32 enable)
  3457. {
  3458. struct wmi_vdev_spectral_enable_cmd *cmd;
  3459. struct sk_buff *skb;
  3460. struct wmi_tlv *tlv;
  3461. void *ptr;
  3462. size_t len;
  3463. len = sizeof(*tlv) + sizeof(*cmd);
  3464. skb = ath10k_wmi_alloc_skb(ar, len);
  3465. if (!skb)
  3466. return ERR_PTR(-ENOMEM);
  3467. ptr = (void *)skb->data;
  3468. tlv = ptr;
  3469. tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
  3470. tlv->len = __cpu_to_le16(sizeof(*cmd));
  3471. cmd = (void *)tlv->value;
  3472. cmd->vdev_id = __cpu_to_le32(vdev_id);
  3473. cmd->trigger_cmd = __cpu_to_le32(trigger);
  3474. cmd->enable_cmd = __cpu_to_le32(enable);
  3475. return skb;
  3476. }
  3477. /****************/
  3478. /* TLV mappings */
  3479. /****************/
  3480. static struct wmi_cmd_map wmi_tlv_cmd_map = {
  3481. .init_cmdid = WMI_TLV_INIT_CMDID,
  3482. .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
  3483. .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
  3484. .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
  3485. .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
  3486. .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
  3487. .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
  3488. .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
  3489. .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
  3490. .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
  3491. .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
  3492. .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
  3493. .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
  3494. .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
  3495. .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
  3496. .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
  3497. .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
  3498. .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
  3499. .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
  3500. .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
  3501. .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
  3502. .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
  3503. .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
  3504. .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
  3505. .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
  3506. .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
  3507. .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
  3508. .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
  3509. .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
  3510. .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
  3511. .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
  3512. .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
  3513. .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
  3514. .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
  3515. .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
  3516. .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
  3517. .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
  3518. .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
  3519. .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
  3520. .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
  3521. .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
  3522. .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
  3523. .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
  3524. .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
  3525. .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
  3526. .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
  3527. .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
  3528. .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
  3529. .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
  3530. .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
  3531. .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
  3532. .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
  3533. .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
  3534. .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
  3535. .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
  3536. .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
  3537. .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
  3538. .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
  3539. .roam_scan_rssi_change_threshold =
  3540. WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
  3541. .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
  3542. .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
  3543. .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
  3544. .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
  3545. .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
  3546. .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
  3547. .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
  3548. .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
  3549. .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
  3550. .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
  3551. .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
  3552. .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
  3553. .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
  3554. .wlan_profile_set_hist_intvl_cmdid =
  3555. WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
  3556. .wlan_profile_get_profile_data_cmdid =
  3557. WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
  3558. .wlan_profile_enable_profile_id_cmdid =
  3559. WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
  3560. .wlan_profile_list_profile_id_cmdid =
  3561. WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
  3562. .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
  3563. .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
  3564. .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
  3565. .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
  3566. .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
  3567. .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
  3568. .wow_enable_disable_wake_event_cmdid =
  3569. WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
  3570. .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
  3571. .wow_hostwakeup_from_sleep_cmdid =
  3572. WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
  3573. .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
  3574. .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
  3575. .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
  3576. .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
  3577. .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
  3578. .request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
  3579. .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
  3580. .network_list_offload_config_cmdid =
  3581. WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
  3582. .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
  3583. .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
  3584. .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
  3585. .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
  3586. .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
  3587. .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
  3588. .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
  3589. .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
  3590. .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
  3591. .echo_cmdid = WMI_TLV_ECHO_CMDID,
  3592. .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
  3593. .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
  3594. .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
  3595. .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
  3596. .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
  3597. .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
  3598. .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
  3599. .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
  3600. .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
  3601. .pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
  3602. .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
  3603. .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
  3604. .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
  3605. .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
  3606. .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
  3607. .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
  3608. .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
  3609. .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
  3610. .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
  3611. .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
  3612. .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
  3613. .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
  3614. .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
  3615. .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
  3616. .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
  3617. .nan_cmdid = WMI_CMD_UNSUPPORTED,
  3618. .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
  3619. .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
  3620. .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
  3621. .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
  3622. .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
  3623. .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
  3624. .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
  3625. .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
  3626. .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
  3627. .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
  3628. .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
  3629. .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
  3630. .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
  3631. .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
  3632. .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
  3633. .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
  3634. .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
  3635. .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
  3636. .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
  3637. .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
  3638. };
  3639. static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
  3640. .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
  3641. .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
  3642. .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
  3643. .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
  3644. .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
  3645. .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
  3646. .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
  3647. .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
  3648. .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
  3649. .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
  3650. .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
  3651. .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
  3652. .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
  3653. .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
  3654. .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
  3655. .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
  3656. .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
  3657. .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
  3658. .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
  3659. .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
  3660. .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
  3661. .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
  3662. .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
  3663. .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
  3664. .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
  3665. .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
  3666. .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  3667. .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
  3668. .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
  3669. .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
  3670. .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
  3671. .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
  3672. .bcnflt_stats_update_period =
  3673. WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
  3674. .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
  3675. .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
  3676. .dcs = WMI_TLV_PDEV_PARAM_DCS,
  3677. .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
  3678. .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
  3679. .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
  3680. .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
  3681. .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
  3682. .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
  3683. .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
  3684. .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
  3685. .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
  3686. .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
  3687. .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
  3688. .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
  3689. .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
  3690. .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
  3691. .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
  3692. .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
  3693. .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
  3694. .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
  3695. .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
  3696. .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
  3697. .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
  3698. .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
  3699. .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
  3700. .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
  3701. .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
  3702. .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
  3703. .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
  3704. .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
  3705. .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
  3706. .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
  3707. .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
  3708. .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
  3709. .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
  3710. .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
  3711. .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
  3712. .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
  3713. .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
  3714. .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
  3715. .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
  3716. .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
  3717. .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
  3718. .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
  3719. .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
  3720. .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
  3721. .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
  3722. .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
  3723. .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
  3724. .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
  3725. .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
  3726. .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
  3727. .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
  3728. .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
  3729. .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
  3730. .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
  3731. .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
  3732. .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
  3733. .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
  3734. .peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
  3735. };
  3736. static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
  3737. .smps_state = WMI_TLV_PEER_SMPS_STATE,
  3738. .ampdu = WMI_TLV_PEER_AMPDU,
  3739. .authorize = WMI_TLV_PEER_AUTHORIZE,
  3740. .chan_width = WMI_TLV_PEER_CHAN_WIDTH,
  3741. .nss = WMI_TLV_PEER_NSS,
  3742. .use_4addr = WMI_TLV_PEER_USE_4ADDR,
  3743. .membership = WMI_TLV_PEER_MEMBERSHIP,
  3744. .user_pos = WMI_TLV_PEER_USERPOS,
  3745. .crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
  3746. .tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
  3747. .set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
  3748. .ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
  3749. .phymode = WMI_TLV_PEER_PHYMODE,
  3750. .use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
  3751. .dummy_var = WMI_TLV_PEER_DUMMY_VAR,
  3752. };
  3753. static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
  3754. .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
  3755. .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
  3756. .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
  3757. .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
  3758. .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
  3759. .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
  3760. .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
  3761. .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
  3762. .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
  3763. .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
  3764. .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
  3765. .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
  3766. .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
  3767. .wmi_vdev_oc_scheduler_air_time_limit =
  3768. WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
  3769. .wds = WMI_TLV_VDEV_PARAM_WDS,
  3770. .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
  3771. .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
  3772. .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
  3773. .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
  3774. .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
  3775. .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
  3776. .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
  3777. .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
  3778. .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
  3779. .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
  3780. .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
  3781. .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
  3782. .sgi = WMI_TLV_VDEV_PARAM_SGI,
  3783. .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
  3784. .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
  3785. .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
  3786. .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
  3787. .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
  3788. .nss = WMI_TLV_VDEV_PARAM_NSS,
  3789. .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
  3790. .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
  3791. .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
  3792. .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
  3793. .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
  3794. .ap_keepalive_min_idle_inactive_time_secs =
  3795. WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
  3796. .ap_keepalive_max_idle_inactive_time_secs =
  3797. WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
  3798. .ap_keepalive_max_unresponsive_time_secs =
  3799. WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
  3800. .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
  3801. .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
  3802. .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
  3803. .txbf = WMI_TLV_VDEV_PARAM_TXBF,
  3804. .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
  3805. .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
  3806. .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
  3807. .ap_detect_out_of_sync_sleeping_sta_time_secs =
  3808. WMI_TLV_VDEV_PARAM_UNSUPPORTED,
  3809. .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
  3810. .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
  3811. .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
  3812. .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
  3813. .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
  3814. .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
  3815. .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
  3816. .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
  3817. .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
  3818. .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
  3819. .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
  3820. .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
  3821. .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
  3822. .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
  3823. .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
  3824. .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
  3825. };
  3826. static const struct wmi_ops wmi_tlv_ops = {
  3827. .rx = ath10k_wmi_tlv_op_rx,
  3828. .map_svc = wmi_tlv_svc_map,
  3829. .map_svc_ext = wmi_tlv_svc_map_ext,
  3830. .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
  3831. .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
  3832. .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
  3833. .pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev,
  3834. .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
  3835. .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
  3836. .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
  3837. .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
  3838. .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
  3839. .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
  3840. .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
  3841. .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
  3842. .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
  3843. .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
  3844. .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
  3845. .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
  3846. .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
  3847. .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
  3848. .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
  3849. .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
  3850. .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
  3851. .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
  3852. .gen_init = ath10k_wmi_tlv_op_gen_init,
  3853. .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
  3854. .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
  3855. .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
  3856. .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
  3857. .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
  3858. .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
  3859. .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
  3860. .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
  3861. .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
  3862. .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
  3863. .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
  3864. .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
  3865. .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
  3866. .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
  3867. .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
  3868. .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
  3869. .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
  3870. .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
  3871. .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
  3872. .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
  3873. .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
  3874. .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
  3875. .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
  3876. .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
  3877. .gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
  3878. .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
  3879. /* .gen_mgmt_tx = not implemented; HTT is used */
  3880. .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
  3881. .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
  3882. .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
  3883. .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
  3884. .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
  3885. .gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode,
  3886. .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature,
  3887. /* .gen_addba_clear_resp not implemented */
  3888. /* .gen_addba_send not implemented */
  3889. /* .gen_addba_set_resp not implemented */
  3890. /* .gen_delba_send not implemented */
  3891. .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
  3892. .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
  3893. .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
  3894. .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
  3895. .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
  3896. .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
  3897. .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
  3898. .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
  3899. .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
  3900. .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
  3901. .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
  3902. .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
  3903. .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
  3904. .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
  3905. .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
  3906. .get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype,
  3907. .gen_echo = ath10k_wmi_tlv_op_gen_echo,
  3908. .gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
  3909. .gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
  3910. };
  3911. static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
  3912. .auth = WMI_TLV_PEER_AUTH,
  3913. .qos = WMI_TLV_PEER_QOS,
  3914. .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
  3915. .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
  3916. .apsd = WMI_TLV_PEER_APSD,
  3917. .ht = WMI_TLV_PEER_HT,
  3918. .bw40 = WMI_TLV_PEER_40MHZ,
  3919. .stbc = WMI_TLV_PEER_STBC,
  3920. .ldbc = WMI_TLV_PEER_LDPC,
  3921. .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
  3922. .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
  3923. .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
  3924. .vht = WMI_TLV_PEER_VHT,
  3925. .bw80 = WMI_TLV_PEER_80MHZ,
  3926. .pmf = WMI_TLV_PEER_PMF,
  3927. .bw160 = WMI_TLV_PEER_160MHZ,
  3928. };
  3929. /************/
  3930. /* TLV init */
  3931. /************/
  3932. void ath10k_wmi_tlv_attach(struct ath10k *ar)
  3933. {
  3934. ar->wmi.cmd = &wmi_tlv_cmd_map;
  3935. ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
  3936. ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
  3937. ar->wmi.peer_param = &wmi_tlv_peer_param_map;
  3938. ar->wmi.ops = &wmi_tlv_ops;
  3939. ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
  3940. }