fw.c 77 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2019-2020 Realtek Corporation
  3. */
  4. #include "cam.h"
  5. #include "chan.h"
  6. #include "coex.h"
  7. #include "debug.h"
  8. #include "fw.h"
  9. #include "mac.h"
  10. #include "phy.h"
  11. #include "reg.h"
  12. static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
  13. bool header)
  14. {
  15. struct sk_buff *skb;
  16. u32 header_len = 0;
  17. u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
  18. if (header)
  19. header_len = H2C_HEADER_LEN;
  20. skb = dev_alloc_skb(len + header_len + h2c_desc_size);
  21. if (!skb)
  22. return NULL;
  23. skb_reserve(skb, header_len + h2c_desc_size);
  24. memset(skb->data, 0, len);
  25. return skb;
  26. }
  27. struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
  28. {
  29. return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
  30. }
  31. struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
  32. {
  33. return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
  34. }
  35. static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
  36. {
  37. u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
  38. return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val);
  39. }
  40. #define FWDL_WAIT_CNT 400000
  41. int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev)
  42. {
  43. u8 val;
  44. int ret;
  45. ret = read_poll_timeout_atomic(_fw_get_rdy, val,
  46. val == RTW89_FWDL_WCPU_FW_INIT_RDY,
  47. 1, FWDL_WAIT_CNT, false, rtwdev);
  48. if (ret) {
  49. switch (val) {
  50. case RTW89_FWDL_CHECKSUM_FAIL:
  51. rtw89_err(rtwdev, "fw checksum fail\n");
  52. return -EINVAL;
  53. case RTW89_FWDL_SECURITY_FAIL:
  54. rtw89_err(rtwdev, "fw security fail\n");
  55. return -EINVAL;
  56. case RTW89_FWDL_CV_NOT_MATCH:
  57. rtw89_err(rtwdev, "fw cv not match\n");
  58. return -EINVAL;
  59. default:
  60. return -EBUSY;
  61. }
  62. }
  63. set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
  64. return 0;
  65. }
  66. static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
  67. struct rtw89_fw_bin_info *info)
  68. {
  69. struct rtw89_fw_hdr_section_info *section_info;
  70. const u8 *fw_end = fw + len;
  71. const u8 *bin;
  72. u32 i;
  73. if (!info)
  74. return -EINVAL;
  75. info->section_num = GET_FW_HDR_SEC_NUM(fw);
  76. info->hdr_len = RTW89_FW_HDR_SIZE +
  77. info->section_num * RTW89_FW_SECTION_HDR_SIZE;
  78. bin = fw + info->hdr_len;
  79. /* jump to section header */
  80. fw += RTW89_FW_HDR_SIZE;
  81. section_info = info->section_info;
  82. for (i = 0; i < info->section_num; i++) {
  83. section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw);
  84. if (GET_FWSECTION_HDR_CHECKSUM(fw))
  85. section_info->len += FWDL_SECTION_CHKSUM_LEN;
  86. section_info->redl = GET_FWSECTION_HDR_REDL(fw);
  87. section_info->dladdr =
  88. GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff;
  89. section_info->addr = bin;
  90. bin += section_info->len;
  91. fw += RTW89_FW_SECTION_HDR_SIZE;
  92. section_info++;
  93. }
  94. if (fw_end != bin) {
  95. rtw89_err(rtwdev, "[ERR]fw bin size\n");
  96. return -EINVAL;
  97. }
  98. return 0;
  99. }
  100. static
  101. int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
  102. struct rtw89_fw_suit *fw_suit)
  103. {
  104. struct rtw89_fw_info *fw_info = &rtwdev->fw;
  105. const u8 *mfw = fw_info->firmware->data;
  106. u32 mfw_len = fw_info->firmware->size;
  107. const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
  108. const struct rtw89_mfw_info *mfw_info;
  109. int i;
  110. if (mfw_hdr->sig != RTW89_MFW_SIG) {
  111. rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
  112. /* legacy firmware support normal type only */
  113. if (type != RTW89_FW_NORMAL)
  114. return -EINVAL;
  115. fw_suit->data = mfw;
  116. fw_suit->size = mfw_len;
  117. return 0;
  118. }
  119. for (i = 0; i < mfw_hdr->fw_nr; i++) {
  120. mfw_info = &mfw_hdr->info[i];
  121. if (mfw_info->cv != rtwdev->hal.cv ||
  122. mfw_info->type != type ||
  123. mfw_info->mp)
  124. continue;
  125. fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
  126. fw_suit->size = le32_to_cpu(mfw_info->size);
  127. return 0;
  128. }
  129. rtw89_err(rtwdev, "no suitable firmware found\n");
  130. return -ENOENT;
  131. }
  132. static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
  133. enum rtw89_fw_type type,
  134. struct rtw89_fw_suit *fw_suit)
  135. {
  136. const u8 *hdr = fw_suit->data;
  137. fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr);
  138. fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr);
  139. fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr);
  140. fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr);
  141. fw_suit->build_year = GET_FW_HDR_YEAR(hdr);
  142. fw_suit->build_mon = GET_FW_HDR_MONTH(hdr);
  143. fw_suit->build_date = GET_FW_HDR_DATE(hdr);
  144. fw_suit->build_hour = GET_FW_HDR_HOUR(hdr);
  145. fw_suit->build_min = GET_FW_HDR_MIN(hdr);
  146. fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr);
  147. rtw89_info(rtwdev,
  148. "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n",
  149. fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
  150. fw_suit->sub_idex, fw_suit->cmd_ver, type);
  151. }
  152. static
  153. int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
  154. {
  155. struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
  156. int ret;
  157. ret = rtw89_mfw_recognize(rtwdev, type, fw_suit);
  158. if (ret)
  159. return ret;
  160. rtw89_fw_update_ver(rtwdev, type, fw_suit);
  161. return 0;
  162. }
  163. #define __DEF_FW_FEAT_COND(__cond, __op) \
  164. static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
  165. { \
  166. return suit_ver_code __op comp_ver_code; \
  167. }
  168. __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
  169. __DEF_FW_FEAT_COND(le, <=); /* less or equal */
  170. struct __fw_feat_cfg {
  171. enum rtw89_core_chip_id chip_id;
  172. enum rtw89_fw_feature feature;
  173. u32 ver_code;
  174. bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
  175. };
  176. #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
  177. { \
  178. .chip_id = _chip, \
  179. .feature = RTW89_FW_FEATURE_ ## _feat, \
  180. .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
  181. .cond = __fw_feat_cond_ ## _cond, \
  182. }
  183. static const struct __fw_feat_cfg fw_feat_tbl[] = {
  184. __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
  185. __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
  186. __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
  187. __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
  188. __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP),
  189. __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP),
  190. __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
  191. __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
  192. __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
  193. __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
  194. };
  195. static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
  196. {
  197. const struct rtw89_chip_info *chip = rtwdev->chip;
  198. const struct __fw_feat_cfg *ent;
  199. const struct rtw89_fw_suit *fw_suit;
  200. u32 suit_ver_code;
  201. int i;
  202. fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
  203. suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
  204. for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
  205. ent = &fw_feat_tbl[i];
  206. if (chip->chip_id != ent->chip_id)
  207. continue;
  208. if (ent->cond(suit_ver_code, ent->ver_code))
  209. RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw);
  210. }
  211. }
  212. void rtw89_early_fw_feature_recognize(struct device *device,
  213. const struct rtw89_chip_info *chip,
  214. u32 *early_feat_map)
  215. {
  216. union {
  217. struct rtw89_mfw_hdr mfw_hdr;
  218. u8 fw_hdr[RTW89_FW_HDR_SIZE];
  219. } buf = {};
  220. const struct firmware *firmware;
  221. u32 ver_code;
  222. int ret;
  223. int i;
  224. ret = request_partial_firmware_into_buf(&firmware, chip->fw_name,
  225. device, &buf, sizeof(buf), 0);
  226. if (ret) {
  227. dev_err(device, "failed to early request firmware: %d\n", ret);
  228. return;
  229. }
  230. ver_code = buf.mfw_hdr.sig != RTW89_MFW_SIG ?
  231. RTW89_FW_HDR_VER_CODE(&buf.fw_hdr) :
  232. RTW89_MFW_HDR_VER_CODE(&buf.mfw_hdr);
  233. if (!ver_code)
  234. goto out;
  235. for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
  236. const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
  237. if (chip->chip_id != ent->chip_id)
  238. continue;
  239. if (ent->cond(ver_code, ent->ver_code))
  240. *early_feat_map |= BIT(ent->feature);
  241. }
  242. out:
  243. release_firmware(firmware);
  244. }
  245. int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
  246. {
  247. int ret;
  248. ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL);
  249. if (ret)
  250. return ret;
  251. /* It still works if wowlan firmware isn't existing. */
  252. __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN);
  253. rtw89_fw_recognize_features(rtwdev);
  254. return 0;
  255. }
  256. void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
  257. u8 type, u8 cat, u8 class, u8 func,
  258. bool rack, bool dack, u32 len)
  259. {
  260. struct fwcmd_hdr *hdr;
  261. hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
  262. if (!(rtwdev->fw.h2c_seq % 4))
  263. rack = true;
  264. hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
  265. FIELD_PREP(H2C_HDR_CAT, cat) |
  266. FIELD_PREP(H2C_HDR_CLASS, class) |
  267. FIELD_PREP(H2C_HDR_FUNC, func) |
  268. FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
  269. hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
  270. len + H2C_HEADER_LEN) |
  271. (rack ? H2C_HDR_REC_ACK : 0) |
  272. (dack ? H2C_HDR_DONE_ACK : 0));
  273. rtwdev->fw.h2c_seq++;
  274. }
  275. static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
  276. struct sk_buff *skb,
  277. u8 type, u8 cat, u8 class, u8 func,
  278. u32 len)
  279. {
  280. struct fwcmd_hdr *hdr;
  281. hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
  282. hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
  283. FIELD_PREP(H2C_HDR_CAT, cat) |
  284. FIELD_PREP(H2C_HDR_CLASS, class) |
  285. FIELD_PREP(H2C_HDR_FUNC, func) |
  286. FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
  287. hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
  288. len + H2C_HEADER_LEN));
  289. }
  290. static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
  291. {
  292. struct sk_buff *skb;
  293. u32 ret = 0;
  294. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
  295. if (!skb) {
  296. rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
  297. return -ENOMEM;
  298. }
  299. skb_put_data(skb, fw, len);
  300. SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
  301. rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
  302. H2C_CAT_MAC, H2C_CL_MAC_FWDL,
  303. H2C_FUNC_MAC_FWHDR_DL, len);
  304. ret = rtw89_h2c_tx(rtwdev, skb, false);
  305. if (ret) {
  306. rtw89_err(rtwdev, "failed to send h2c\n");
  307. ret = -1;
  308. goto fail;
  309. }
  310. return 0;
  311. fail:
  312. dev_kfree_skb_any(skb);
  313. return ret;
  314. }
  315. static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
  316. {
  317. u8 val;
  318. int ret;
  319. ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
  320. if (ret) {
  321. rtw89_err(rtwdev, "[ERR]FW header download\n");
  322. return ret;
  323. }
  324. ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY,
  325. 1, FWDL_WAIT_CNT, false,
  326. rtwdev, R_AX_WCPU_FW_CTRL);
  327. if (ret) {
  328. rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
  329. return ret;
  330. }
  331. rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
  332. rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
  333. return 0;
  334. }
  335. static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
  336. struct rtw89_fw_hdr_section_info *info)
  337. {
  338. struct sk_buff *skb;
  339. const u8 *section = info->addr;
  340. u32 residue_len = info->len;
  341. u32 pkt_len;
  342. int ret;
  343. while (residue_len) {
  344. if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
  345. pkt_len = FWDL_SECTION_PER_PKT_LEN;
  346. else
  347. pkt_len = residue_len;
  348. skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
  349. if (!skb) {
  350. rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
  351. return -ENOMEM;
  352. }
  353. skb_put_data(skb, section, pkt_len);
  354. ret = rtw89_h2c_tx(rtwdev, skb, true);
  355. if (ret) {
  356. rtw89_err(rtwdev, "failed to send h2c\n");
  357. ret = -1;
  358. goto fail;
  359. }
  360. section += pkt_len;
  361. residue_len -= pkt_len;
  362. }
  363. return 0;
  364. fail:
  365. dev_kfree_skb_any(skb);
  366. return ret;
  367. }
  368. static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw,
  369. struct rtw89_fw_bin_info *info)
  370. {
  371. struct rtw89_fw_hdr_section_info *section_info = info->section_info;
  372. u8 section_num = info->section_num;
  373. int ret;
  374. while (section_num--) {
  375. ret = __rtw89_fw_download_main(rtwdev, section_info);
  376. if (ret)
  377. return ret;
  378. section_info++;
  379. }
  380. mdelay(5);
  381. ret = rtw89_fw_check_rdy(rtwdev);
  382. if (ret) {
  383. rtw89_warn(rtwdev, "download firmware fail\n");
  384. return ret;
  385. }
  386. return 0;
  387. }
  388. static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
  389. {
  390. u32 val32;
  391. u16 index;
  392. rtw89_write32(rtwdev, R_AX_DBG_CTRL,
  393. FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
  394. FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
  395. rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
  396. for (index = 0; index < 15; index++) {
  397. val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL);
  398. rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
  399. fsleep(10);
  400. }
  401. }
  402. static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
  403. {
  404. u32 val32;
  405. u16 val16;
  406. val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
  407. rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
  408. val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
  409. rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
  410. rtw89_fw_prog_cnt_dump(rtwdev);
  411. }
  412. int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
  413. {
  414. struct rtw89_fw_info *fw_info = &rtwdev->fw;
  415. struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
  416. struct rtw89_fw_bin_info info;
  417. const u8 *fw = fw_suit->data;
  418. u32 len = fw_suit->size;
  419. u8 val;
  420. int ret;
  421. if (!fw || !len) {
  422. rtw89_err(rtwdev, "fw type %d isn't recognized\n", type);
  423. return -ENOENT;
  424. }
  425. ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info);
  426. if (ret) {
  427. rtw89_err(rtwdev, "parse fw header fail\n");
  428. goto fwdl_err;
  429. }
  430. ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY,
  431. 1, FWDL_WAIT_CNT, false,
  432. rtwdev, R_AX_WCPU_FW_CTRL);
  433. if (ret) {
  434. rtw89_err(rtwdev, "[ERR]H2C path ready\n");
  435. goto fwdl_err;
  436. }
  437. ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len);
  438. if (ret) {
  439. ret = -EBUSY;
  440. goto fwdl_err;
  441. }
  442. ret = rtw89_fw_download_main(rtwdev, fw, &info);
  443. if (ret) {
  444. ret = -EBUSY;
  445. goto fwdl_err;
  446. }
  447. fw_info->h2c_seq = 0;
  448. fw_info->rec_seq = 0;
  449. rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
  450. rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
  451. return ret;
  452. fwdl_err:
  453. rtw89_fw_dl_fail_dump(rtwdev);
  454. return ret;
  455. }
  456. int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
  457. {
  458. struct rtw89_fw_info *fw = &rtwdev->fw;
  459. wait_for_completion(&fw->completion);
  460. if (!fw->firmware)
  461. return -EINVAL;
  462. return 0;
  463. }
  464. static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context)
  465. {
  466. struct rtw89_fw_info *fw = context;
  467. struct rtw89_dev *rtwdev = fw->rtwdev;
  468. if (!firmware || !firmware->data) {
  469. rtw89_err(rtwdev, "failed to request firmware\n");
  470. complete_all(&fw->completion);
  471. return;
  472. }
  473. fw->firmware = firmware;
  474. complete_all(&fw->completion);
  475. }
  476. int rtw89_load_firmware(struct rtw89_dev *rtwdev)
  477. {
  478. struct rtw89_fw_info *fw = &rtwdev->fw;
  479. const char *fw_name = rtwdev->chip->fw_name;
  480. int ret;
  481. fw->rtwdev = rtwdev;
  482. init_completion(&fw->completion);
  483. ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
  484. GFP_KERNEL, fw, rtw89_load_firmware_cb);
  485. if (ret) {
  486. rtw89_err(rtwdev, "failed to async firmware request\n");
  487. return ret;
  488. }
  489. return 0;
  490. }
  491. void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
  492. {
  493. struct rtw89_fw_info *fw = &rtwdev->fw;
  494. rtw89_wait_firmware_completion(rtwdev);
  495. if (fw->firmware)
  496. release_firmware(fw->firmware);
  497. }
  498. #define H2C_CAM_LEN 60
  499. int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
  500. struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
  501. {
  502. struct sk_buff *skb;
  503. int ret;
  504. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
  505. if (!skb) {
  506. rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
  507. return -ENOMEM;
  508. }
  509. skb_put(skb, H2C_CAM_LEN);
  510. rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
  511. rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
  512. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  513. H2C_CAT_MAC,
  514. H2C_CL_MAC_ADDR_CAM_UPDATE,
  515. H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
  516. H2C_CAM_LEN);
  517. ret = rtw89_h2c_tx(rtwdev, skb, false);
  518. if (ret) {
  519. rtw89_err(rtwdev, "failed to send h2c\n");
  520. goto fail;
  521. }
  522. return 0;
  523. fail:
  524. dev_kfree_skb_any(skb);
  525. return ret;
  526. }
  527. #define H2C_DCTL_SEC_CAM_LEN 68
  528. int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
  529. struct rtw89_vif *rtwvif,
  530. struct rtw89_sta *rtwsta)
  531. {
  532. struct sk_buff *skb;
  533. int ret;
  534. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
  535. if (!skb) {
  536. rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
  537. return -ENOMEM;
  538. }
  539. skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
  540. rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
  541. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  542. H2C_CAT_MAC,
  543. H2C_CL_MAC_FR_EXCHG,
  544. H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
  545. H2C_DCTL_SEC_CAM_LEN);
  546. ret = rtw89_h2c_tx(rtwdev, skb, false);
  547. if (ret) {
  548. rtw89_err(rtwdev, "failed to send h2c\n");
  549. goto fail;
  550. }
  551. return 0;
  552. fail:
  553. dev_kfree_skb_any(skb);
  554. return ret;
  555. }
  556. EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
  557. #define H2C_BA_CAM_LEN 8
  558. int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
  559. bool valid, struct ieee80211_ampdu_params *params)
  560. {
  561. const struct rtw89_chip_info *chip = rtwdev->chip;
  562. struct rtw89_vif *rtwvif = rtwsta->rtwvif;
  563. u8 macid = rtwsta->mac_id;
  564. struct sk_buff *skb;
  565. u8 entry_idx;
  566. int ret;
  567. ret = valid ?
  568. rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
  569. rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
  570. if (ret) {
  571. /* it still works even if we don't have static BA CAM, because
  572. * hardware can create dynamic BA CAM automatically.
  573. */
  574. rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  575. "failed to %s entry tid=%d for h2c ba cam\n",
  576. valid ? "alloc" : "free", params->tid);
  577. return 0;
  578. }
  579. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
  580. if (!skb) {
  581. rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
  582. return -ENOMEM;
  583. }
  584. skb_put(skb, H2C_BA_CAM_LEN);
  585. SET_BA_CAM_MACID(skb->data, macid);
  586. if (chip->bacam_v1)
  587. SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
  588. else
  589. SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
  590. if (!valid)
  591. goto end;
  592. SET_BA_CAM_VALID(skb->data, valid);
  593. SET_BA_CAM_TID(skb->data, params->tid);
  594. if (params->buf_size > 64)
  595. SET_BA_CAM_BMAP_SIZE(skb->data, 4);
  596. else
  597. SET_BA_CAM_BMAP_SIZE(skb->data, 0);
  598. /* If init req is set, hw will set the ssn */
  599. SET_BA_CAM_INIT_REQ(skb->data, 1);
  600. SET_BA_CAM_SSN(skb->data, params->ssn);
  601. if (chip->bacam_v1) {
  602. SET_BA_CAM_STD_EN(skb->data, 1);
  603. SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
  604. }
  605. end:
  606. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  607. H2C_CAT_MAC,
  608. H2C_CL_BA_CAM,
  609. H2C_FUNC_MAC_BA_CAM, 0, 1,
  610. H2C_BA_CAM_LEN);
  611. ret = rtw89_h2c_tx(rtwdev, skb, false);
  612. if (ret) {
  613. rtw89_err(rtwdev, "failed to send h2c\n");
  614. goto fail;
  615. }
  616. return 0;
  617. fail:
  618. dev_kfree_skb_any(skb);
  619. return ret;
  620. }
  621. static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev,
  622. u8 entry_idx, u8 uid)
  623. {
  624. struct sk_buff *skb;
  625. int ret;
  626. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
  627. if (!skb) {
  628. rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
  629. return -ENOMEM;
  630. }
  631. skb_put(skb, H2C_BA_CAM_LEN);
  632. SET_BA_CAM_VALID(skb->data, 1);
  633. SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
  634. SET_BA_CAM_UID(skb->data, uid);
  635. SET_BA_CAM_BAND(skb->data, 0);
  636. SET_BA_CAM_STD_EN(skb->data, 0);
  637. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  638. H2C_CAT_MAC,
  639. H2C_CL_BA_CAM,
  640. H2C_FUNC_MAC_BA_CAM, 0, 1,
  641. H2C_BA_CAM_LEN);
  642. ret = rtw89_h2c_tx(rtwdev, skb, false);
  643. if (ret) {
  644. rtw89_err(rtwdev, "failed to send h2c\n");
  645. goto fail;
  646. }
  647. return 0;
  648. fail:
  649. dev_kfree_skb_any(skb);
  650. return ret;
  651. }
  652. void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev)
  653. {
  654. const struct rtw89_chip_info *chip = rtwdev->chip;
  655. u8 entry_idx = chip->bacam_num;
  656. u8 uid = 0;
  657. int i;
  658. for (i = 0; i < chip->bacam_dynamic_num; i++) {
  659. rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid);
  660. entry_idx++;
  661. uid++;
  662. }
  663. }
  664. #define H2C_LOG_CFG_LEN 12
  665. int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
  666. {
  667. struct sk_buff *skb;
  668. u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
  669. BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
  670. int ret;
  671. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
  672. if (!skb) {
  673. rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
  674. return -ENOMEM;
  675. }
  676. skb_put(skb, H2C_LOG_CFG_LEN);
  677. SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER);
  678. SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
  679. SET_LOG_CFG_COMP(skb->data, comp);
  680. SET_LOG_CFG_COMP_EXT(skb->data, 0);
  681. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  682. H2C_CAT_MAC,
  683. H2C_CL_FW_INFO,
  684. H2C_FUNC_LOG_CFG, 0, 0,
  685. H2C_LOG_CFG_LEN);
  686. ret = rtw89_h2c_tx(rtwdev, skb, false);
  687. if (ret) {
  688. rtw89_err(rtwdev, "failed to send h2c\n");
  689. goto fail;
  690. }
  691. return 0;
  692. fail:
  693. dev_kfree_skb_any(skb);
  694. return ret;
  695. }
  696. #define H2C_GENERAL_PKT_LEN 6
  697. #define H2C_GENERAL_PKT_ID_UND 0xff
  698. int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
  699. {
  700. struct sk_buff *skb;
  701. int ret;
  702. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
  703. if (!skb) {
  704. rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
  705. return -ENOMEM;
  706. }
  707. skb_put(skb, H2C_GENERAL_PKT_LEN);
  708. SET_GENERAL_PKT_MACID(skb->data, macid);
  709. SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
  710. SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
  711. SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
  712. SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
  713. SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
  714. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  715. H2C_CAT_MAC,
  716. H2C_CL_FW_INFO,
  717. H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
  718. H2C_GENERAL_PKT_LEN);
  719. ret = rtw89_h2c_tx(rtwdev, skb, false);
  720. if (ret) {
  721. rtw89_err(rtwdev, "failed to send h2c\n");
  722. goto fail;
  723. }
  724. return 0;
  725. fail:
  726. dev_kfree_skb_any(skb);
  727. return ret;
  728. }
  729. #define H2C_LPS_PARM_LEN 8
  730. int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
  731. struct rtw89_lps_parm *lps_param)
  732. {
  733. struct sk_buff *skb;
  734. int ret;
  735. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
  736. if (!skb) {
  737. rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
  738. return -ENOMEM;
  739. }
  740. skb_put(skb, H2C_LPS_PARM_LEN);
  741. SET_LPS_PARM_MACID(skb->data, lps_param->macid);
  742. SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
  743. SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
  744. SET_LPS_PARM_RLBM(skb->data, 1);
  745. SET_LPS_PARM_SMARTPS(skb->data, 1);
  746. SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
  747. SET_LPS_PARM_VOUAPSD(skb->data, 0);
  748. SET_LPS_PARM_VIUAPSD(skb->data, 0);
  749. SET_LPS_PARM_BEUAPSD(skb->data, 0);
  750. SET_LPS_PARM_BKUAPSD(skb->data, 0);
  751. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  752. H2C_CAT_MAC,
  753. H2C_CL_MAC_PS,
  754. H2C_FUNC_MAC_LPS_PARM, 0, 1,
  755. H2C_LPS_PARM_LEN);
  756. ret = rtw89_h2c_tx(rtwdev, skb, false);
  757. if (ret) {
  758. rtw89_err(rtwdev, "failed to send h2c\n");
  759. goto fail;
  760. }
  761. return 0;
  762. fail:
  763. dev_kfree_skb_any(skb);
  764. return ret;
  765. }
  766. #define H2C_P2P_ACT_LEN 20
  767. int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
  768. struct ieee80211_p2p_noa_desc *desc,
  769. u8 act, u8 noa_id)
  770. {
  771. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  772. bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
  773. u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
  774. struct sk_buff *skb;
  775. u8 *cmd;
  776. int ret;
  777. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
  778. if (!skb) {
  779. rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
  780. return -ENOMEM;
  781. }
  782. skb_put(skb, H2C_P2P_ACT_LEN);
  783. cmd = skb->data;
  784. RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
  785. RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
  786. RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
  787. RTW89_SET_FWCMD_P2P_ACT(cmd, act);
  788. RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
  789. RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
  790. if (desc) {
  791. RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
  792. RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
  793. RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
  794. RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
  795. RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
  796. }
  797. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  798. H2C_CAT_MAC, H2C_CL_MAC_PS,
  799. H2C_FUNC_P2P_ACT, 0, 0,
  800. H2C_P2P_ACT_LEN);
  801. ret = rtw89_h2c_tx(rtwdev, skb, false);
  802. if (ret) {
  803. rtw89_err(rtwdev, "failed to send h2c\n");
  804. goto fail;
  805. }
  806. return 0;
  807. fail:
  808. dev_kfree_skb_any(skb);
  809. return ret;
  810. }
  811. static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
  812. struct sk_buff *skb)
  813. {
  814. struct rtw89_hal *hal = &rtwdev->hal;
  815. u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
  816. u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
  817. SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
  818. SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
  819. SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
  820. SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
  821. SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
  822. }
  823. #define H2C_CMC_TBL_LEN 68
  824. int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
  825. struct rtw89_vif *rtwvif)
  826. {
  827. const struct rtw89_chip_info *chip = rtwdev->chip;
  828. struct sk_buff *skb;
  829. u8 macid = rtwvif->mac_id;
  830. int ret;
  831. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
  832. if (!skb) {
  833. rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
  834. return -ENOMEM;
  835. }
  836. skb_put(skb, H2C_CMC_TBL_LEN);
  837. SET_CTRL_INFO_MACID(skb->data, macid);
  838. SET_CTRL_INFO_OPERATION(skb->data, 1);
  839. if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
  840. SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
  841. __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
  842. SET_CMC_TBL_ANTSEL_A(skb->data, 0);
  843. SET_CMC_TBL_ANTSEL_B(skb->data, 0);
  844. SET_CMC_TBL_ANTSEL_C(skb->data, 0);
  845. SET_CMC_TBL_ANTSEL_D(skb->data, 0);
  846. }
  847. SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
  848. SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
  849. if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
  850. SET_CMC_TBL_DATA_DCM(skb->data, 0);
  851. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  852. H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
  853. chip->h2c_cctl_func_id, 0, 1,
  854. H2C_CMC_TBL_LEN);
  855. ret = rtw89_h2c_tx(rtwdev, skb, false);
  856. if (ret) {
  857. rtw89_err(rtwdev, "failed to send h2c\n");
  858. goto fail;
  859. }
  860. return 0;
  861. fail:
  862. dev_kfree_skb_any(skb);
  863. return ret;
  864. }
  865. static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
  866. struct ieee80211_sta *sta, u8 *pads)
  867. {
  868. bool ppe_th;
  869. u8 ppe16, ppe8;
  870. u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
  871. u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
  872. u8 ru_bitmap;
  873. u8 n, idx, sh;
  874. u16 ppe;
  875. int i;
  876. if (!sta->deflink.he_cap.has_he)
  877. return;
  878. ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
  879. sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
  880. if (!ppe_th) {
  881. u8 pad;
  882. pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
  883. sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
  884. for (i = 0; i < RTW89_PPE_BW_NUM; i++)
  885. pads[i] = pad;
  886. return;
  887. }
  888. ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
  889. n = hweight8(ru_bitmap);
  890. n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
  891. for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
  892. if (!(ru_bitmap & BIT(i))) {
  893. pads[i] = 1;
  894. continue;
  895. }
  896. idx = n >> 3;
  897. sh = n & 7;
  898. n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
  899. ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
  900. ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
  901. sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
  902. ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
  903. if (ppe16 != 7 && ppe8 == 7)
  904. pads[i] = 2;
  905. else if (ppe8 != 7)
  906. pads[i] = 1;
  907. else
  908. pads[i] = 0;
  909. }
  910. }
  911. int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
  912. struct ieee80211_vif *vif,
  913. struct ieee80211_sta *sta)
  914. {
  915. const struct rtw89_chip_info *chip = rtwdev->chip;
  916. struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
  917. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  918. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  919. struct sk_buff *skb;
  920. u8 pads[RTW89_PPE_BW_NUM];
  921. u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
  922. u16 lowest_rate;
  923. int ret;
  924. memset(pads, 0, sizeof(pads));
  925. if (sta)
  926. __get_sta_he_pkt_padding(rtwdev, sta, pads);
  927. if (vif->p2p)
  928. lowest_rate = RTW89_HW_RATE_OFDM6;
  929. else if (chan->band_type == RTW89_BAND_2G)
  930. lowest_rate = RTW89_HW_RATE_CCK1;
  931. else
  932. lowest_rate = RTW89_HW_RATE_OFDM6;
  933. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
  934. if (!skb) {
  935. rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
  936. return -ENOMEM;
  937. }
  938. skb_put(skb, H2C_CMC_TBL_LEN);
  939. SET_CTRL_INFO_MACID(skb->data, mac_id);
  940. SET_CTRL_INFO_OPERATION(skb->data, 1);
  941. SET_CMC_TBL_DISRTSFB(skb->data, 1);
  942. SET_CMC_TBL_DISDATAFB(skb->data, 1);
  943. SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
  944. SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
  945. SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
  946. if (vif->type == NL80211_IFTYPE_STATION)
  947. SET_CMC_TBL_ULDL(skb->data, 1);
  948. else
  949. SET_CMC_TBL_ULDL(skb->data, 0);
  950. SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
  951. if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
  952. SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
  953. SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
  954. SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
  955. SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
  956. } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
  957. SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
  958. SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
  959. SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
  960. SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
  961. }
  962. if (sta)
  963. SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
  964. sta->deflink.he_cap.has_he);
  965. if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
  966. SET_CMC_TBL_DATA_DCM(skb->data, 0);
  967. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  968. H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
  969. chip->h2c_cctl_func_id, 0, 1,
  970. H2C_CMC_TBL_LEN);
  971. ret = rtw89_h2c_tx(rtwdev, skb, false);
  972. if (ret) {
  973. rtw89_err(rtwdev, "failed to send h2c\n");
  974. goto fail;
  975. }
  976. return 0;
  977. fail:
  978. dev_kfree_skb_any(skb);
  979. return ret;
  980. }
  981. int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
  982. struct rtw89_sta *rtwsta)
  983. {
  984. const struct rtw89_chip_info *chip = rtwdev->chip;
  985. struct sk_buff *skb;
  986. int ret;
  987. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
  988. if (!skb) {
  989. rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
  990. return -ENOMEM;
  991. }
  992. skb_put(skb, H2C_CMC_TBL_LEN);
  993. SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
  994. SET_CTRL_INFO_OPERATION(skb->data, 1);
  995. if (rtwsta->cctl_tx_time) {
  996. SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
  997. SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
  998. }
  999. if (rtwsta->cctl_tx_retry_limit) {
  1000. SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
  1001. SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
  1002. }
  1003. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1004. H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
  1005. chip->h2c_cctl_func_id, 0, 1,
  1006. H2C_CMC_TBL_LEN);
  1007. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1008. if (ret) {
  1009. rtw89_err(rtwdev, "failed to send h2c\n");
  1010. goto fail;
  1011. }
  1012. return 0;
  1013. fail:
  1014. dev_kfree_skb_any(skb);
  1015. return ret;
  1016. }
  1017. int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
  1018. struct rtw89_sta *rtwsta)
  1019. {
  1020. const struct rtw89_chip_info *chip = rtwdev->chip;
  1021. struct sk_buff *skb;
  1022. int ret;
  1023. if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
  1024. return 0;
  1025. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
  1026. if (!skb) {
  1027. rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
  1028. return -ENOMEM;
  1029. }
  1030. skb_put(skb, H2C_CMC_TBL_LEN);
  1031. SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
  1032. SET_CTRL_INFO_OPERATION(skb->data, 1);
  1033. __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
  1034. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1035. H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
  1036. H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
  1037. H2C_CMC_TBL_LEN);
  1038. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1039. if (ret) {
  1040. rtw89_err(rtwdev, "failed to send h2c\n");
  1041. goto fail;
  1042. }
  1043. return 0;
  1044. fail:
  1045. dev_kfree_skb_any(skb);
  1046. return ret;
  1047. }
  1048. #define H2C_BCN_BASE_LEN 12
  1049. int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
  1050. struct rtw89_vif *rtwvif)
  1051. {
  1052. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  1053. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1054. struct sk_buff *skb;
  1055. struct sk_buff *skb_beacon;
  1056. u16 tim_offset;
  1057. int bcn_total_len;
  1058. u16 beacon_rate;
  1059. int ret;
  1060. if (vif->p2p)
  1061. beacon_rate = RTW89_HW_RATE_OFDM6;
  1062. else if (chan->band_type == RTW89_BAND_2G)
  1063. beacon_rate = RTW89_HW_RATE_CCK1;
  1064. else
  1065. beacon_rate = RTW89_HW_RATE_OFDM6;
  1066. skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
  1067. NULL, 0);
  1068. if (!skb_beacon) {
  1069. rtw89_err(rtwdev, "failed to get beacon skb\n");
  1070. return -ENOMEM;
  1071. }
  1072. bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
  1073. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
  1074. if (!skb) {
  1075. rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
  1076. dev_kfree_skb_any(skb_beacon);
  1077. return -ENOMEM;
  1078. }
  1079. skb_put(skb, H2C_BCN_BASE_LEN);
  1080. SET_BCN_UPD_PORT(skb->data, rtwvif->port);
  1081. SET_BCN_UPD_MBSSID(skb->data, 0);
  1082. SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
  1083. SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
  1084. SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
  1085. SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
  1086. SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
  1087. SET_BCN_UPD_RATE(skb->data, beacon_rate);
  1088. skb_put_data(skb, skb_beacon->data, skb_beacon->len);
  1089. dev_kfree_skb_any(skb_beacon);
  1090. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1091. H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
  1092. H2C_FUNC_MAC_BCN_UPD, 0, 1,
  1093. bcn_total_len);
  1094. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1095. if (ret) {
  1096. rtw89_err(rtwdev, "failed to send h2c\n");
  1097. dev_kfree_skb_any(skb);
  1098. return ret;
  1099. }
  1100. return 0;
  1101. }
  1102. #define H2C_ROLE_MAINTAIN_LEN 4
  1103. int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
  1104. struct rtw89_vif *rtwvif,
  1105. struct rtw89_sta *rtwsta,
  1106. enum rtw89_upd_mode upd_mode)
  1107. {
  1108. struct sk_buff *skb;
  1109. u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
  1110. u8 self_role;
  1111. int ret;
  1112. if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
  1113. if (rtwsta)
  1114. self_role = RTW89_SELF_ROLE_AP_CLIENT;
  1115. else
  1116. self_role = rtwvif->self_role;
  1117. } else {
  1118. self_role = rtwvif->self_role;
  1119. }
  1120. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
  1121. if (!skb) {
  1122. rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
  1123. return -ENOMEM;
  1124. }
  1125. skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
  1126. SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
  1127. SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
  1128. SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
  1129. SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
  1130. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1131. H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
  1132. H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
  1133. H2C_ROLE_MAINTAIN_LEN);
  1134. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1135. if (ret) {
  1136. rtw89_err(rtwdev, "failed to send h2c\n");
  1137. goto fail;
  1138. }
  1139. return 0;
  1140. fail:
  1141. dev_kfree_skb_any(skb);
  1142. return ret;
  1143. }
  1144. #define H2C_JOIN_INFO_LEN 4
  1145. int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
  1146. struct rtw89_sta *rtwsta, bool dis_conn)
  1147. {
  1148. struct sk_buff *skb;
  1149. u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
  1150. u8 self_role = rtwvif->self_role;
  1151. u8 net_type = rtwvif->net_type;
  1152. int ret;
  1153. if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
  1154. self_role = RTW89_SELF_ROLE_AP_CLIENT;
  1155. net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
  1156. }
  1157. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
  1158. if (!skb) {
  1159. rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
  1160. return -ENOMEM;
  1161. }
  1162. skb_put(skb, H2C_JOIN_INFO_LEN);
  1163. SET_JOININFO_MACID(skb->data, mac_id);
  1164. SET_JOININFO_OP(skb->data, dis_conn);
  1165. SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
  1166. SET_JOININFO_WMM(skb->data, rtwvif->wmm);
  1167. SET_JOININFO_TGR(skb->data, rtwvif->trigger);
  1168. SET_JOININFO_ISHESTA(skb->data, 0);
  1169. SET_JOININFO_DLBW(skb->data, 0);
  1170. SET_JOININFO_TF_MAC_PAD(skb->data, 0);
  1171. SET_JOININFO_DL_T_PE(skb->data, 0);
  1172. SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
  1173. SET_JOININFO_NET_TYPE(skb->data, net_type);
  1174. SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
  1175. SET_JOININFO_SELF_ROLE(skb->data, self_role);
  1176. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1177. H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
  1178. H2C_FUNC_MAC_JOININFO, 0, 1,
  1179. H2C_JOIN_INFO_LEN);
  1180. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1181. if (ret) {
  1182. rtw89_err(rtwdev, "failed to send h2c\n");
  1183. goto fail;
  1184. }
  1185. return 0;
  1186. fail:
  1187. dev_kfree_skb_any(skb);
  1188. return ret;
  1189. }
  1190. int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
  1191. bool pause)
  1192. {
  1193. struct rtw89_fw_macid_pause_grp h2c = {{0}};
  1194. u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
  1195. struct sk_buff *skb;
  1196. int ret;
  1197. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
  1198. if (!skb) {
  1199. rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
  1200. return -ENOMEM;
  1201. }
  1202. h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
  1203. if (pause)
  1204. h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
  1205. skb_put_data(skb, &h2c, len);
  1206. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1207. H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
  1208. H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
  1209. len);
  1210. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1211. if (ret) {
  1212. rtw89_err(rtwdev, "failed to send h2c\n");
  1213. goto fail;
  1214. }
  1215. return 0;
  1216. fail:
  1217. dev_kfree_skb_any(skb);
  1218. return ret;
  1219. }
  1220. #define H2C_EDCA_LEN 12
  1221. int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
  1222. u8 ac, u32 val)
  1223. {
  1224. struct sk_buff *skb;
  1225. int ret;
  1226. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
  1227. if (!skb) {
  1228. rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
  1229. return -ENOMEM;
  1230. }
  1231. skb_put(skb, H2C_EDCA_LEN);
  1232. RTW89_SET_EDCA_SEL(skb->data, 0);
  1233. RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
  1234. RTW89_SET_EDCA_WMM(skb->data, 0);
  1235. RTW89_SET_EDCA_AC(skb->data, ac);
  1236. RTW89_SET_EDCA_PARAM(skb->data, val);
  1237. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1238. H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
  1239. H2C_FUNC_USR_EDCA, 0, 1,
  1240. H2C_EDCA_LEN);
  1241. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1242. if (ret) {
  1243. rtw89_err(rtwdev, "failed to send h2c\n");
  1244. goto fail;
  1245. }
  1246. return 0;
  1247. fail:
  1248. dev_kfree_skb_any(skb);
  1249. return ret;
  1250. }
  1251. #define H2C_TSF32_TOGL_LEN 4
  1252. int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
  1253. bool en)
  1254. {
  1255. struct sk_buff *skb;
  1256. u16 early_us = en ? 2000 : 0;
  1257. u8 *cmd;
  1258. int ret;
  1259. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
  1260. if (!skb) {
  1261. rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
  1262. return -ENOMEM;
  1263. }
  1264. skb_put(skb, H2C_TSF32_TOGL_LEN);
  1265. cmd = skb->data;
  1266. RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
  1267. RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
  1268. RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
  1269. RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
  1270. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1271. H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
  1272. H2C_FUNC_TSF32_TOGL, 0, 0,
  1273. H2C_TSF32_TOGL_LEN);
  1274. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1275. if (ret) {
  1276. rtw89_err(rtwdev, "failed to send h2c\n");
  1277. goto fail;
  1278. }
  1279. return 0;
  1280. fail:
  1281. dev_kfree_skb_any(skb);
  1282. return ret;
  1283. }
  1284. #define H2C_OFLD_CFG_LEN 8
  1285. int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
  1286. {
  1287. static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
  1288. struct sk_buff *skb;
  1289. int ret;
  1290. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
  1291. if (!skb) {
  1292. rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
  1293. return -ENOMEM;
  1294. }
  1295. skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
  1296. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1297. H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
  1298. H2C_FUNC_OFLD_CFG, 0, 1,
  1299. H2C_OFLD_CFG_LEN);
  1300. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1301. if (ret) {
  1302. rtw89_err(rtwdev, "failed to send h2c\n");
  1303. goto fail;
  1304. }
  1305. return 0;
  1306. fail:
  1307. dev_kfree_skb_any(skb);
  1308. return ret;
  1309. }
  1310. #define H2C_RA_LEN 16
  1311. int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
  1312. {
  1313. struct sk_buff *skb;
  1314. u8 *cmd;
  1315. int ret;
  1316. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN);
  1317. if (!skb) {
  1318. rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
  1319. return -ENOMEM;
  1320. }
  1321. skb_put(skb, H2C_RA_LEN);
  1322. cmd = skb->data;
  1323. rtw89_debug(rtwdev, RTW89_DBG_RA,
  1324. "ra cmd msk: %llx ", ra->ra_mask);
  1325. RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl);
  1326. RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap);
  1327. RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid);
  1328. RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap);
  1329. RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap);
  1330. RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv);
  1331. RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all);
  1332. RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi);
  1333. RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap);
  1334. RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap);
  1335. RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num);
  1336. RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf);
  1337. RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask);
  1338. RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask);
  1339. RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask));
  1340. RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask));
  1341. RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask));
  1342. RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask));
  1343. RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask));
  1344. RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en);
  1345. RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf);
  1346. if (csi) {
  1347. RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1);
  1348. RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num);
  1349. RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel);
  1350. RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en);
  1351. RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en);
  1352. RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx);
  1353. RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode);
  1354. RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf);
  1355. RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw);
  1356. }
  1357. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1358. H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
  1359. H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
  1360. H2C_RA_LEN);
  1361. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1362. if (ret) {
  1363. rtw89_err(rtwdev, "failed to send h2c\n");
  1364. goto fail;
  1365. }
  1366. return 0;
  1367. fail:
  1368. dev_kfree_skb_any(skb);
  1369. return ret;
  1370. }
  1371. #define H2C_LEN_CXDRVHDR 2
  1372. #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR)
  1373. int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
  1374. {
  1375. struct rtw89_btc *btc = &rtwdev->btc;
  1376. struct rtw89_btc_dm *dm = &btc->dm;
  1377. struct rtw89_btc_init_info *init_info = &dm->init_info;
  1378. struct rtw89_btc_module *module = &init_info->module;
  1379. struct rtw89_btc_ant_info *ant = &module->ant;
  1380. struct sk_buff *skb;
  1381. u8 *cmd;
  1382. int ret;
  1383. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT);
  1384. if (!skb) {
  1385. rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
  1386. return -ENOMEM;
  1387. }
  1388. skb_put(skb, H2C_LEN_CXDRVINFO_INIT);
  1389. cmd = skb->data;
  1390. RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT);
  1391. RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR);
  1392. RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type);
  1393. RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num);
  1394. RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation);
  1395. RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos);
  1396. RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity);
  1397. RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type);
  1398. RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv);
  1399. RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo);
  1400. RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos);
  1401. RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type);
  1402. RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch);
  1403. RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only);
  1404. RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok);
  1405. RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en);
  1406. RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other);
  1407. RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only);
  1408. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1409. H2C_CAT_OUTSRC, BTFC_SET,
  1410. SET_DRV_INFO, 0, 0,
  1411. H2C_LEN_CXDRVINFO_INIT);
  1412. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1413. if (ret) {
  1414. rtw89_err(rtwdev, "failed to send h2c\n");
  1415. goto fail;
  1416. }
  1417. return 0;
  1418. fail:
  1419. dev_kfree_skb_any(skb);
  1420. return ret;
  1421. }
  1422. #define PORT_DATA_OFFSET 4
  1423. #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
  1424. #define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
  1425. #define H2C_LEN_CXDRVINFO_ROLE_V1 (4 + 16 * RTW89_PORT_NUM + \
  1426. H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + \
  1427. H2C_LEN_CXDRVHDR)
  1428. int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
  1429. {
  1430. struct rtw89_btc *btc = &rtwdev->btc;
  1431. struct rtw89_btc_wl_info *wl = &btc->cx.wl;
  1432. struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
  1433. struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
  1434. struct rtw89_btc_wl_active_role *active = role_info->active_role;
  1435. struct sk_buff *skb;
  1436. u8 offset = 0;
  1437. u8 *cmd;
  1438. int ret;
  1439. int i;
  1440. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE);
  1441. if (!skb) {
  1442. rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
  1443. return -ENOMEM;
  1444. }
  1445. skb_put(skb, H2C_LEN_CXDRVINFO_ROLE);
  1446. cmd = skb->data;
  1447. RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
  1448. RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR);
  1449. RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
  1450. RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
  1451. RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
  1452. RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
  1453. RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
  1454. RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
  1455. RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
  1456. RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
  1457. RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
  1458. RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
  1459. RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
  1460. RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
  1461. RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
  1462. RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
  1463. for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
  1464. RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
  1465. RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
  1466. RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
  1467. RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
  1468. RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
  1469. RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
  1470. RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
  1471. RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
  1472. RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
  1473. RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
  1474. RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
  1475. RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
  1476. RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
  1477. }
  1478. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1479. H2C_CAT_OUTSRC, BTFC_SET,
  1480. SET_DRV_INFO, 0, 0,
  1481. H2C_LEN_CXDRVINFO_ROLE);
  1482. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1483. if (ret) {
  1484. rtw89_err(rtwdev, "failed to send h2c\n");
  1485. goto fail;
  1486. }
  1487. return 0;
  1488. fail:
  1489. dev_kfree_skb_any(skb);
  1490. return ret;
  1491. }
  1492. int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
  1493. {
  1494. struct rtw89_btc *btc = &rtwdev->btc;
  1495. struct rtw89_btc_wl_info *wl = &btc->cx.wl;
  1496. struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
  1497. struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
  1498. struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
  1499. struct sk_buff *skb;
  1500. u8 *cmd, offset;
  1501. int ret;
  1502. int i;
  1503. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE_V1);
  1504. if (!skb) {
  1505. rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
  1506. return -ENOMEM;
  1507. }
  1508. skb_put(skb, H2C_LEN_CXDRVINFO_ROLE_V1);
  1509. cmd = skb->data;
  1510. RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
  1511. RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVHDR);
  1512. RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
  1513. RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
  1514. RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
  1515. RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
  1516. RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
  1517. RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
  1518. RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
  1519. RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
  1520. RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
  1521. RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
  1522. RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
  1523. RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
  1524. RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
  1525. RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
  1526. offset = PORT_DATA_OFFSET;
  1527. for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
  1528. RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
  1529. RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
  1530. RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
  1531. RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
  1532. RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
  1533. RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
  1534. RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
  1535. RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
  1536. RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
  1537. RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
  1538. RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
  1539. RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
  1540. RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
  1541. RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
  1542. }
  1543. offset = H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
  1544. RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
  1545. RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
  1546. RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
  1547. RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
  1548. RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
  1549. RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
  1550. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1551. H2C_CAT_OUTSRC, BTFC_SET,
  1552. SET_DRV_INFO, 0, 0,
  1553. H2C_LEN_CXDRVINFO_ROLE_V1);
  1554. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1555. if (ret) {
  1556. rtw89_err(rtwdev, "failed to send h2c\n");
  1557. goto fail;
  1558. }
  1559. return 0;
  1560. fail:
  1561. dev_kfree_skb_any(skb);
  1562. return ret;
  1563. }
  1564. #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
  1565. int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
  1566. {
  1567. const struct rtw89_chip_info *chip = rtwdev->chip;
  1568. struct rtw89_btc *btc = &rtwdev->btc;
  1569. struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
  1570. struct sk_buff *skb;
  1571. u8 *cmd;
  1572. int ret;
  1573. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
  1574. if (!skb) {
  1575. rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
  1576. return -ENOMEM;
  1577. }
  1578. skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
  1579. cmd = skb->data;
  1580. RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
  1581. RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
  1582. RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
  1583. RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
  1584. RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
  1585. if (chip->chip_id == RTL8852A)
  1586. RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
  1587. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1588. H2C_CAT_OUTSRC, BTFC_SET,
  1589. SET_DRV_INFO, 0, 0,
  1590. H2C_LEN_CXDRVINFO_CTRL);
  1591. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1592. if (ret) {
  1593. rtw89_err(rtwdev, "failed to send h2c\n");
  1594. goto fail;
  1595. }
  1596. return 0;
  1597. fail:
  1598. dev_kfree_skb_any(skb);
  1599. return ret;
  1600. }
  1601. #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
  1602. int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
  1603. {
  1604. struct rtw89_btc *btc = &rtwdev->btc;
  1605. struct rtw89_btc_wl_info *wl = &btc->cx.wl;
  1606. struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
  1607. struct sk_buff *skb;
  1608. u8 *cmd;
  1609. int ret;
  1610. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
  1611. if (!skb) {
  1612. rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
  1613. return -ENOMEM;
  1614. }
  1615. skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
  1616. cmd = skb->data;
  1617. RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
  1618. RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
  1619. RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
  1620. RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
  1621. RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
  1622. RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
  1623. RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
  1624. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1625. H2C_CAT_OUTSRC, BTFC_SET,
  1626. SET_DRV_INFO, 0, 0,
  1627. H2C_LEN_CXDRVINFO_RFK);
  1628. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1629. if (ret) {
  1630. rtw89_err(rtwdev, "failed to send h2c\n");
  1631. goto fail;
  1632. }
  1633. return 0;
  1634. fail:
  1635. dev_kfree_skb_any(skb);
  1636. return ret;
  1637. }
  1638. #define H2C_LEN_PKT_OFLD 4
  1639. int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
  1640. {
  1641. struct sk_buff *skb;
  1642. u8 *cmd;
  1643. int ret;
  1644. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
  1645. if (!skb) {
  1646. rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
  1647. return -ENOMEM;
  1648. }
  1649. skb_put(skb, H2C_LEN_PKT_OFLD);
  1650. cmd = skb->data;
  1651. RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
  1652. RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
  1653. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1654. H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
  1655. H2C_FUNC_PACKET_OFLD, 1, 1,
  1656. H2C_LEN_PKT_OFLD);
  1657. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1658. if (ret) {
  1659. rtw89_err(rtwdev, "failed to send h2c\n");
  1660. goto fail;
  1661. }
  1662. return 0;
  1663. fail:
  1664. dev_kfree_skb_any(skb);
  1665. return ret;
  1666. }
  1667. int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
  1668. struct sk_buff *skb_ofld)
  1669. {
  1670. struct sk_buff *skb;
  1671. u8 *cmd;
  1672. u8 alloc_id;
  1673. int ret;
  1674. alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
  1675. RTW89_MAX_PKT_OFLD_NUM);
  1676. if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
  1677. return -ENOSPC;
  1678. *id = alloc_id;
  1679. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
  1680. if (!skb) {
  1681. rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
  1682. return -ENOMEM;
  1683. }
  1684. skb_put(skb, H2C_LEN_PKT_OFLD);
  1685. cmd = skb->data;
  1686. RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
  1687. RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
  1688. RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
  1689. skb_put_data(skb, skb_ofld->data, skb_ofld->len);
  1690. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1691. H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
  1692. H2C_FUNC_PACKET_OFLD, 1, 1,
  1693. H2C_LEN_PKT_OFLD + skb_ofld->len);
  1694. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1695. if (ret) {
  1696. rtw89_err(rtwdev, "failed to send h2c\n");
  1697. goto fail;
  1698. }
  1699. return 0;
  1700. fail:
  1701. dev_kfree_skb_any(skb);
  1702. return ret;
  1703. }
  1704. #define H2C_LEN_SCAN_LIST_OFFLOAD 4
  1705. int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
  1706. struct list_head *chan_list)
  1707. {
  1708. struct rtw89_mac_chinfo *ch_info;
  1709. struct sk_buff *skb;
  1710. int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
  1711. u8 *cmd;
  1712. int ret;
  1713. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
  1714. if (!skb) {
  1715. rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
  1716. return -ENOMEM;
  1717. }
  1718. skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
  1719. cmd = skb->data;
  1720. RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
  1721. /* in unit of 4 bytes */
  1722. RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
  1723. list_for_each_entry(ch_info, chan_list, list) {
  1724. cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
  1725. RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
  1726. RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
  1727. RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
  1728. RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
  1729. RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
  1730. RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
  1731. RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
  1732. RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
  1733. RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
  1734. RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
  1735. RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
  1736. RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
  1737. RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
  1738. RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
  1739. RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
  1740. RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
  1741. RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
  1742. RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
  1743. RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
  1744. RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
  1745. RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
  1746. RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
  1747. }
  1748. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1749. H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
  1750. H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
  1751. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1752. if (ret) {
  1753. rtw89_err(rtwdev, "failed to send h2c\n");
  1754. goto fail;
  1755. }
  1756. return 0;
  1757. fail:
  1758. dev_kfree_skb_any(skb);
  1759. return ret;
  1760. }
  1761. #define H2C_LEN_SCAN_OFFLOAD 28
  1762. int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
  1763. struct rtw89_scan_option *option,
  1764. struct rtw89_vif *rtwvif)
  1765. {
  1766. struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
  1767. struct sk_buff *skb;
  1768. u8 *cmd;
  1769. int ret;
  1770. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD);
  1771. if (!skb) {
  1772. rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
  1773. return -ENOMEM;
  1774. }
  1775. skb_put(skb, H2C_LEN_SCAN_OFFLOAD);
  1776. cmd = skb->data;
  1777. RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id);
  1778. RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port);
  1779. RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0);
  1780. RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable);
  1781. RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true);
  1782. RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode);
  1783. RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE);
  1784. RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE);
  1785. if (option->target_ch_mode) {
  1786. RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw);
  1787. RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd,
  1788. scan_info->op_pri_ch);
  1789. RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
  1790. scan_info->op_chan);
  1791. RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd,
  1792. scan_info->op_band);
  1793. }
  1794. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1795. H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
  1796. H2C_FUNC_SCANOFLD, 1, 1,
  1797. H2C_LEN_SCAN_OFFLOAD);
  1798. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1799. if (ret) {
  1800. rtw89_err(rtwdev, "failed to send h2c\n");
  1801. goto fail;
  1802. }
  1803. return 0;
  1804. fail:
  1805. dev_kfree_skb_any(skb);
  1806. return ret;
  1807. }
  1808. int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
  1809. struct rtw89_fw_h2c_rf_reg_info *info,
  1810. u16 len, u8 page)
  1811. {
  1812. struct sk_buff *skb;
  1813. u8 class = info->rf_path == RF_PATH_A ?
  1814. H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
  1815. int ret;
  1816. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
  1817. if (!skb) {
  1818. rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
  1819. return -ENOMEM;
  1820. }
  1821. skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
  1822. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1823. H2C_CAT_OUTSRC, class, page, 0, 0,
  1824. len);
  1825. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1826. if (ret) {
  1827. rtw89_err(rtwdev, "failed to send h2c\n");
  1828. goto fail;
  1829. }
  1830. return 0;
  1831. fail:
  1832. dev_kfree_skb_any(skb);
  1833. return ret;
  1834. }
  1835. int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
  1836. {
  1837. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  1838. struct rtw89_mcc_info *mcc_info = &rtwdev->mcc;
  1839. struct rtw89_fw_h2c_rf_get_mccch *mccch;
  1840. struct sk_buff *skb;
  1841. int ret;
  1842. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
  1843. if (!skb) {
  1844. rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
  1845. return -ENOMEM;
  1846. }
  1847. skb_put(skb, sizeof(*mccch));
  1848. mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
  1849. mccch->ch_0 = cpu_to_le32(mcc_info->ch[0]);
  1850. mccch->ch_1 = cpu_to_le32(mcc_info->ch[1]);
  1851. mccch->band_0 = cpu_to_le32(mcc_info->band[0]);
  1852. mccch->band_1 = cpu_to_le32(mcc_info->band[1]);
  1853. mccch->current_channel = cpu_to_le32(chan->channel);
  1854. mccch->current_band_type = cpu_to_le32(chan->band_type);
  1855. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1856. H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
  1857. H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
  1858. sizeof(*mccch));
  1859. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1860. if (ret) {
  1861. rtw89_err(rtwdev, "failed to send h2c\n");
  1862. goto fail;
  1863. }
  1864. return 0;
  1865. fail:
  1866. dev_kfree_skb_any(skb);
  1867. return ret;
  1868. }
  1869. EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
  1870. int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
  1871. u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
  1872. bool rack, bool dack)
  1873. {
  1874. struct sk_buff *skb;
  1875. int ret;
  1876. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
  1877. if (!skb) {
  1878. rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
  1879. return -ENOMEM;
  1880. }
  1881. skb_put_data(skb, buf, len);
  1882. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  1883. H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
  1884. len);
  1885. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1886. if (ret) {
  1887. rtw89_err(rtwdev, "failed to send h2c\n");
  1888. goto fail;
  1889. }
  1890. return 0;
  1891. fail:
  1892. dev_kfree_skb_any(skb);
  1893. return ret;
  1894. }
  1895. int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
  1896. {
  1897. struct sk_buff *skb;
  1898. int ret;
  1899. skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
  1900. if (!skb) {
  1901. rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
  1902. return -ENOMEM;
  1903. }
  1904. skb_put_data(skb, buf, len);
  1905. ret = rtw89_h2c_tx(rtwdev, skb, false);
  1906. if (ret) {
  1907. rtw89_err(rtwdev, "failed to send h2c\n");
  1908. goto fail;
  1909. }
  1910. return 0;
  1911. fail:
  1912. dev_kfree_skb_any(skb);
  1913. return ret;
  1914. }
  1915. void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
  1916. {
  1917. struct rtw89_early_h2c *early_h2c;
  1918. lockdep_assert_held(&rtwdev->mutex);
  1919. list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
  1920. rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
  1921. }
  1922. }
  1923. void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
  1924. {
  1925. struct rtw89_early_h2c *early_h2c, *tmp;
  1926. mutex_lock(&rtwdev->mutex);
  1927. list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
  1928. list_del(&early_h2c->list);
  1929. kfree(early_h2c->h2c);
  1930. kfree(early_h2c);
  1931. }
  1932. mutex_unlock(&rtwdev->mutex);
  1933. }
  1934. void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
  1935. {
  1936. skb_queue_tail(&rtwdev->c2h_queue, c2h);
  1937. ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
  1938. }
  1939. static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
  1940. struct sk_buff *skb)
  1941. {
  1942. u8 category = RTW89_GET_C2H_CATEGORY(skb->data);
  1943. u8 class = RTW89_GET_C2H_CLASS(skb->data);
  1944. u8 func = RTW89_GET_C2H_FUNC(skb->data);
  1945. u16 len = RTW89_GET_C2H_LEN(skb->data);
  1946. bool dump = true;
  1947. if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
  1948. return;
  1949. switch (category) {
  1950. case RTW89_C2H_CAT_TEST:
  1951. break;
  1952. case RTW89_C2H_CAT_MAC:
  1953. rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
  1954. if (class == RTW89_MAC_C2H_CLASS_INFO &&
  1955. func == RTW89_MAC_C2H_FUNC_C2H_LOG)
  1956. dump = false;
  1957. break;
  1958. case RTW89_C2H_CAT_OUTSRC:
  1959. if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
  1960. class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
  1961. rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
  1962. else
  1963. rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
  1964. break;
  1965. }
  1966. if (dump)
  1967. rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
  1968. }
  1969. void rtw89_fw_c2h_work(struct work_struct *work)
  1970. {
  1971. struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
  1972. c2h_work);
  1973. struct sk_buff *skb, *tmp;
  1974. skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
  1975. skb_unlink(skb, &rtwdev->c2h_queue);
  1976. mutex_lock(&rtwdev->mutex);
  1977. rtw89_fw_c2h_cmd_handle(rtwdev, skb);
  1978. mutex_unlock(&rtwdev->mutex);
  1979. dev_kfree_skb_any(skb);
  1980. }
  1981. }
  1982. static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
  1983. struct rtw89_mac_h2c_info *info)
  1984. {
  1985. const struct rtw89_chip_info *chip = rtwdev->chip;
  1986. const u32 *h2c_reg = chip->h2c_regs;
  1987. u8 i, val, len;
  1988. int ret;
  1989. ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
  1990. rtwdev, chip->h2c_ctrl_reg);
  1991. if (ret) {
  1992. rtw89_warn(rtwdev, "FW does not process h2c registers\n");
  1993. return ret;
  1994. }
  1995. len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
  1996. sizeof(info->h2creg[0]));
  1997. RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id);
  1998. RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len);
  1999. for (i = 0; i < RTW89_H2CREG_MAX; i++)
  2000. rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
  2001. rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
  2002. return 0;
  2003. }
  2004. static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
  2005. struct rtw89_mac_c2h_info *info)
  2006. {
  2007. const struct rtw89_chip_info *chip = rtwdev->chip;
  2008. const u32 *c2h_reg = chip->c2h_regs;
  2009. u32 ret;
  2010. u8 i, val;
  2011. info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
  2012. ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
  2013. RTW89_C2H_TIMEOUT, false, rtwdev,
  2014. chip->c2h_ctrl_reg);
  2015. if (ret) {
  2016. rtw89_warn(rtwdev, "c2h reg timeout\n");
  2017. return ret;
  2018. }
  2019. for (i = 0; i < RTW89_C2HREG_MAX; i++)
  2020. info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
  2021. rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
  2022. info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
  2023. info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
  2024. RTW89_C2HREG_HDR_LEN;
  2025. return 0;
  2026. }
  2027. int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
  2028. struct rtw89_mac_h2c_info *h2c_info,
  2029. struct rtw89_mac_c2h_info *c2h_info)
  2030. {
  2031. u32 ret;
  2032. if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
  2033. lockdep_assert_held(&rtwdev->mutex);
  2034. if (!h2c_info && !c2h_info)
  2035. return -EINVAL;
  2036. if (!h2c_info)
  2037. goto recv_c2h;
  2038. ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
  2039. if (ret)
  2040. return ret;
  2041. recv_c2h:
  2042. if (!c2h_info)
  2043. return 0;
  2044. ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
  2045. if (ret)
  2046. return ret;
  2047. return 0;
  2048. }
  2049. void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
  2050. {
  2051. if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
  2052. rtw89_err(rtwdev, "[ERR]pwr is off\n");
  2053. return;
  2054. }
  2055. rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
  2056. rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
  2057. rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
  2058. rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
  2059. rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
  2060. rtw89_read32(rtwdev, R_AX_HALT_C2H));
  2061. rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
  2062. rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
  2063. rtw89_fw_prog_cnt_dump(rtwdev);
  2064. }
  2065. static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
  2066. {
  2067. struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
  2068. struct rtw89_pktofld_info *info, *tmp;
  2069. u8 idx;
  2070. for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
  2071. if (!(rtwdev->chip->support_bands & BIT(idx)))
  2072. continue;
  2073. list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
  2074. rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
  2075. rtw89_core_release_bit_map(rtwdev->pkt_offload,
  2076. info->id);
  2077. list_del(&info->list);
  2078. kfree(info);
  2079. }
  2080. }
  2081. }
  2082. static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
  2083. struct rtw89_vif *rtwvif,
  2084. struct sk_buff *skb)
  2085. {
  2086. struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
  2087. struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
  2088. struct rtw89_pktofld_info *info;
  2089. struct sk_buff *new;
  2090. int ret = 0;
  2091. u8 band;
  2092. for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
  2093. if (!(rtwdev->chip->support_bands & BIT(band)))
  2094. continue;
  2095. new = skb_copy(skb, GFP_KERNEL);
  2096. if (!new) {
  2097. ret = -ENOMEM;
  2098. goto out;
  2099. }
  2100. skb_put_data(new, ies->ies[band], ies->len[band]);
  2101. skb_put_data(new, ies->common_ies, ies->common_ie_len);
  2102. info = kzalloc(sizeof(*info), GFP_KERNEL);
  2103. if (!info) {
  2104. ret = -ENOMEM;
  2105. kfree_skb(new);
  2106. goto out;
  2107. }
  2108. list_add_tail(&info->list, &scan_info->pkt_list[band]);
  2109. ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
  2110. if (ret) {
  2111. kfree_skb(new);
  2112. goto out;
  2113. }
  2114. kfree_skb(new);
  2115. }
  2116. out:
  2117. return ret;
  2118. }
  2119. static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
  2120. struct rtw89_vif *rtwvif)
  2121. {
  2122. struct cfg80211_scan_request *req = rtwvif->scan_req;
  2123. struct sk_buff *skb;
  2124. u8 num = req->n_ssids, i;
  2125. int ret;
  2126. for (i = 0; i < num; i++) {
  2127. skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
  2128. req->ssids[i].ssid,
  2129. req->ssids[i].ssid_len,
  2130. req->ie_len);
  2131. if (!skb)
  2132. return -ENOMEM;
  2133. ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb);
  2134. kfree_skb(skb);
  2135. if (ret)
  2136. return ret;
  2137. }
  2138. return 0;
  2139. }
  2140. static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
  2141. int ssid_num,
  2142. struct rtw89_mac_chinfo *ch_info)
  2143. {
  2144. struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
  2145. struct rtw89_pktofld_info *info;
  2146. u8 band, probe_count = 0;
  2147. ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
  2148. ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
  2149. ch_info->bw = RTW89_SCAN_WIDTH;
  2150. ch_info->tx_pkt = true;
  2151. ch_info->cfg_tx_pwr = false;
  2152. ch_info->tx_pwr_idx = 0;
  2153. ch_info->tx_null = false;
  2154. ch_info->pause_data = false;
  2155. if (ssid_num) {
  2156. ch_info->num_pkt = ssid_num;
  2157. band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
  2158. list_for_each_entry(info, &scan_info->pkt_list[band], list) {
  2159. ch_info->probe_id = info->id;
  2160. ch_info->pkt_id[probe_count] = info->id;
  2161. if (++probe_count >= ssid_num)
  2162. break;
  2163. }
  2164. if (probe_count != ssid_num)
  2165. rtw89_err(rtwdev, "SSID num differs from list len\n");
  2166. }
  2167. switch (chan_type) {
  2168. case RTW89_CHAN_OPERATE:
  2169. ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
  2170. ch_info->central_ch = scan_info->op_chan;
  2171. ch_info->pri_ch = scan_info->op_pri_ch;
  2172. ch_info->ch_band = scan_info->op_band;
  2173. ch_info->bw = scan_info->op_bw;
  2174. ch_info->tx_null = true;
  2175. ch_info->num_pkt = 0;
  2176. break;
  2177. case RTW89_CHAN_DFS:
  2178. ch_info->period = max_t(u8, ch_info->period,
  2179. RTW89_DFS_CHAN_TIME);
  2180. ch_info->dwell_time = RTW89_DWELL_TIME;
  2181. break;
  2182. case RTW89_CHAN_ACTIVE:
  2183. break;
  2184. default:
  2185. rtw89_err(rtwdev, "Channel type out of bound\n");
  2186. }
  2187. }
  2188. static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
  2189. struct rtw89_vif *rtwvif)
  2190. {
  2191. struct cfg80211_scan_request *req = rtwvif->scan_req;
  2192. struct rtw89_mac_chinfo *ch_info, *tmp;
  2193. struct ieee80211_channel *channel;
  2194. struct list_head chan_list;
  2195. bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
  2196. int list_len, off_chan_time = 0;
  2197. enum rtw89_chan_type type;
  2198. int ret = 0;
  2199. u32 idx;
  2200. INIT_LIST_HEAD(&chan_list);
  2201. for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
  2202. idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
  2203. idx++, list_len++) {
  2204. channel = req->channels[idx];
  2205. ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
  2206. if (!ch_info) {
  2207. ret = -ENOMEM;
  2208. goto out;
  2209. }
  2210. ch_info->period = req->duration_mandatory ?
  2211. req->duration : RTW89_CHANNEL_TIME;
  2212. ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
  2213. ch_info->central_ch = channel->hw_value;
  2214. ch_info->pri_ch = channel->hw_value;
  2215. ch_info->rand_seq_num = random_seq;
  2216. if (channel->flags &
  2217. (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
  2218. type = RTW89_CHAN_DFS;
  2219. else
  2220. type = RTW89_CHAN_ACTIVE;
  2221. rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
  2222. if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK &&
  2223. off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
  2224. tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  2225. if (!tmp) {
  2226. ret = -ENOMEM;
  2227. kfree(ch_info);
  2228. goto out;
  2229. }
  2230. type = RTW89_CHAN_OPERATE;
  2231. tmp->period = req->duration_mandatory ?
  2232. req->duration : RTW89_CHANNEL_TIME;
  2233. rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
  2234. list_add_tail(&tmp->list, &chan_list);
  2235. off_chan_time = 0;
  2236. list_len++;
  2237. }
  2238. list_add_tail(&ch_info->list, &chan_list);
  2239. off_chan_time += ch_info->period;
  2240. }
  2241. rtwdev->scan_info.last_chan_idx = idx;
  2242. ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
  2243. out:
  2244. list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
  2245. list_del(&ch_info->list);
  2246. kfree(ch_info);
  2247. }
  2248. return ret;
  2249. }
  2250. static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
  2251. struct rtw89_vif *rtwvif)
  2252. {
  2253. int ret;
  2254. ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
  2255. if (ret) {
  2256. rtw89_err(rtwdev, "Update probe request failed\n");
  2257. goto out;
  2258. }
  2259. ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif);
  2260. out:
  2261. return ret;
  2262. }
  2263. void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
  2264. struct ieee80211_scan_request *scan_req)
  2265. {
  2266. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  2267. struct cfg80211_scan_request *req = &scan_req->req;
  2268. u32 rx_fltr = rtwdev->hal.rx_fltr;
  2269. u8 mac_addr[ETH_ALEN];
  2270. rtwdev->scan_info.scanning_vif = vif;
  2271. rtwdev->scan_info.last_chan_idx = 0;
  2272. rtwvif->scan_ies = &scan_req->ies;
  2273. rtwvif->scan_req = req;
  2274. ieee80211_stop_queues(rtwdev->hw);
  2275. if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
  2276. get_random_mask_addr(mac_addr, req->mac_addr,
  2277. req->mac_addr_mask);
  2278. else
  2279. ether_addr_copy(mac_addr, vif->addr);
  2280. rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
  2281. rx_fltr &= ~B_AX_A_BCN_CHK_EN;
  2282. rx_fltr &= ~B_AX_A_BC;
  2283. rx_fltr &= ~B_AX_A_A1_MATCH;
  2284. rtw89_write32_mask(rtwdev,
  2285. rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
  2286. B_AX_RX_FLTR_CFG_MASK,
  2287. rx_fltr);
  2288. }
  2289. void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
  2290. bool aborted)
  2291. {
  2292. struct cfg80211_scan_info info = {
  2293. .aborted = aborted,
  2294. };
  2295. struct rtw89_vif *rtwvif;
  2296. if (!vif)
  2297. return;
  2298. rtw89_write32_mask(rtwdev,
  2299. rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
  2300. B_AX_RX_FLTR_CFG_MASK,
  2301. rtwdev->hal.rx_fltr);
  2302. rtw89_core_scan_complete(rtwdev, vif, true);
  2303. ieee80211_scan_completed(rtwdev->hw, &info);
  2304. ieee80211_wake_queues(rtwdev->hw);
  2305. rtw89_release_pkt_list(rtwdev);
  2306. rtwvif = (struct rtw89_vif *)vif->drv_priv;
  2307. rtwvif->scan_req = NULL;
  2308. rtwvif->scan_ies = NULL;
  2309. rtwdev->scan_info.last_chan_idx = 0;
  2310. rtwdev->scan_info.scanning_vif = NULL;
  2311. if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK)
  2312. rtw89_store_op_chan(rtwdev, false);
  2313. }
  2314. void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
  2315. {
  2316. rtw89_hw_scan_offload(rtwdev, vif, false);
  2317. rtw89_hw_scan_complete(rtwdev, vif, true);
  2318. }
  2319. int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
  2320. bool enable)
  2321. {
  2322. struct rtw89_scan_option opt = {0};
  2323. struct rtw89_vif *rtwvif;
  2324. int ret = 0;
  2325. rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
  2326. if (!rtwvif)
  2327. return -EINVAL;
  2328. opt.enable = enable;
  2329. opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
  2330. if (enable) {
  2331. ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif);
  2332. if (ret)
  2333. goto out;
  2334. }
  2335. ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
  2336. out:
  2337. return ret;
  2338. }
  2339. void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup)
  2340. {
  2341. struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
  2342. const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
  2343. struct rtw89_chan new;
  2344. if (backup) {
  2345. scan_info->op_pri_ch = cur->primary_channel;
  2346. scan_info->op_chan = cur->channel;
  2347. scan_info->op_bw = cur->band_width;
  2348. scan_info->op_band = cur->band_type;
  2349. } else {
  2350. rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch,
  2351. scan_info->op_band, scan_info->op_bw);
  2352. rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
  2353. }
  2354. }
  2355. #define H2C_FW_CPU_EXCEPTION_LEN 4
  2356. #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
  2357. int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
  2358. {
  2359. struct sk_buff *skb;
  2360. int ret;
  2361. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
  2362. if (!skb) {
  2363. rtw89_err(rtwdev,
  2364. "failed to alloc skb for fw cpu exception\n");
  2365. return -ENOMEM;
  2366. }
  2367. skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
  2368. RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
  2369. H2C_FW_CPU_EXCEPTION_TYPE_DEF);
  2370. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  2371. H2C_CAT_TEST,
  2372. H2C_CL_FW_STATUS_TEST,
  2373. H2C_FUNC_CPU_EXCEPTION, 0, 0,
  2374. H2C_FW_CPU_EXCEPTION_LEN);
  2375. ret = rtw89_h2c_tx(rtwdev, skb, false);
  2376. if (ret) {
  2377. rtw89_err(rtwdev, "failed to send h2c\n");
  2378. goto fail;
  2379. }
  2380. return 0;
  2381. fail:
  2382. dev_kfree_skb_any(skb);
  2383. return ret;
  2384. }
  2385. #define H2C_PKT_DROP_LEN 24
  2386. int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
  2387. const struct rtw89_pkt_drop_params *params)
  2388. {
  2389. struct sk_buff *skb;
  2390. int ret;
  2391. skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
  2392. if (!skb) {
  2393. rtw89_err(rtwdev,
  2394. "failed to alloc skb for packet drop\n");
  2395. return -ENOMEM;
  2396. }
  2397. switch (params->sel) {
  2398. case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
  2399. case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
  2400. case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
  2401. case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
  2402. break;
  2403. default:
  2404. rtw89_debug(rtwdev, RTW89_DBG_FW,
  2405. "H2C of pkt drop might not fully support sel: %d yet\n",
  2406. params->sel);
  2407. break;
  2408. }
  2409. skb_put(skb, H2C_PKT_DROP_LEN);
  2410. RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
  2411. RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
  2412. RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
  2413. RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
  2414. RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
  2415. RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
  2416. rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
  2417. H2C_CAT_MAC,
  2418. H2C_CL_MAC_FW_OFLD,
  2419. H2C_FUNC_PKT_DROP, 0, 0,
  2420. H2C_PKT_DROP_LEN);
  2421. ret = rtw89_h2c_tx(rtwdev, skb, false);
  2422. if (ret) {
  2423. rtw89_err(rtwdev, "failed to send h2c\n");
  2424. goto fail;
  2425. }
  2426. return 0;
  2427. fail:
  2428. dev_kfree_skb_any(skb);
  2429. return ret;
  2430. }