fw.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2018-2019 Realtek Corporation
  3. */
  4. #include <linux/iopoll.h>
  5. #include "main.h"
  6. #include "coex.h"
  7. #include "fw.h"
  8. #include "tx.h"
  9. #include "reg.h"
  10. #include "sec.h"
  11. #include "debug.h"
  12. #include "util.h"
  13. #include "wow.h"
  14. #include "ps.h"
  15. #include "phy.h"
  16. #include "mac.h"
  17. static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev,
  18. struct sk_buff *skb)
  19. {
  20. struct rtw_c2h_cmd *c2h;
  21. u8 sub_cmd_id;
  22. c2h = get_c2h_from_skb(skb);
  23. sub_cmd_id = c2h->payload[0];
  24. switch (sub_cmd_id) {
  25. case C2H_CCX_RPT:
  26. rtw_tx_report_handle(rtwdev, skb, C2H_CCX_RPT);
  27. break;
  28. case C2H_SCAN_STATUS_RPT:
  29. rtw_hw_scan_status_report(rtwdev, skb);
  30. break;
  31. case C2H_CHAN_SWITCH:
  32. rtw_hw_scan_chan_switch(rtwdev, skb);
  33. break;
  34. default:
  35. break;
  36. }
  37. }
  38. static u16 get_max_amsdu_len(u32 bit_rate)
  39. {
  40. /* lower than ofdm, do not aggregate */
  41. if (bit_rate < 550)
  42. return 1;
  43. /* lower than 20M 2ss mcs8, make it small */
  44. if (bit_rate < 1800)
  45. return 1200;
  46. /* lower than 40M 2ss mcs9, make it medium */
  47. if (bit_rate < 4000)
  48. return 2600;
  49. /* not yet 80M 2ss mcs8/9, make it twice regular packet size */
  50. if (bit_rate < 7000)
  51. return 3500;
  52. /* unlimited */
  53. return 0;
  54. }
  55. struct rtw_fw_iter_ra_data {
  56. struct rtw_dev *rtwdev;
  57. u8 *payload;
  58. };
  59. static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta)
  60. {
  61. struct rtw_fw_iter_ra_data *ra_data = data;
  62. struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
  63. u8 mac_id, rate, sgi, bw;
  64. u8 mcs, nss;
  65. u32 bit_rate;
  66. mac_id = GET_RA_REPORT_MACID(ra_data->payload);
  67. if (si->mac_id != mac_id)
  68. return;
  69. si->ra_report.txrate.flags = 0;
  70. rate = GET_RA_REPORT_RATE(ra_data->payload);
  71. sgi = GET_RA_REPORT_SGI(ra_data->payload);
  72. bw = GET_RA_REPORT_BW(ra_data->payload);
  73. if (rate < DESC_RATEMCS0) {
  74. si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate);
  75. goto legacy;
  76. }
  77. rtw_desc_to_mcsrate(rate, &mcs, &nss);
  78. if (rate >= DESC_RATEVHT1SS_MCS0)
  79. si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
  80. else if (rate >= DESC_RATEMCS0)
  81. si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS;
  82. if (rate >= DESC_RATEMCS0) {
  83. si->ra_report.txrate.mcs = mcs;
  84. si->ra_report.txrate.nss = nss;
  85. }
  86. if (sgi)
  87. si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
  88. if (bw == RTW_CHANNEL_WIDTH_80)
  89. si->ra_report.txrate.bw = RATE_INFO_BW_80;
  90. else if (bw == RTW_CHANNEL_WIDTH_40)
  91. si->ra_report.txrate.bw = RATE_INFO_BW_40;
  92. else
  93. si->ra_report.txrate.bw = RATE_INFO_BW_20;
  94. legacy:
  95. bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate);
  96. si->ra_report.desc_rate = rate;
  97. si->ra_report.bit_rate = bit_rate;
  98. sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(bit_rate);
  99. }
  100. static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload,
  101. u8 length)
  102. {
  103. struct rtw_fw_iter_ra_data ra_data;
  104. if (WARN(length < 7, "invalid ra report c2h length\n"))
  105. return;
  106. rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload);
  107. ra_data.rtwdev = rtwdev;
  108. ra_data.payload = payload;
  109. rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data);
  110. }
  111. struct rtw_beacon_filter_iter_data {
  112. struct rtw_dev *rtwdev;
  113. u8 *payload;
  114. };
  115. static void rtw_fw_bcn_filter_notify_vif_iter(void *data, u8 *mac,
  116. struct ieee80211_vif *vif)
  117. {
  118. struct rtw_beacon_filter_iter_data *iter_data = data;
  119. struct rtw_dev *rtwdev = iter_data->rtwdev;
  120. u8 *payload = iter_data->payload;
  121. u8 type = GET_BCN_FILTER_NOTIFY_TYPE(payload);
  122. u8 event = GET_BCN_FILTER_NOTIFY_EVENT(payload);
  123. s8 sig = (s8)GET_BCN_FILTER_NOTIFY_RSSI(payload);
  124. switch (type) {
  125. case BCN_FILTER_NOTIFY_SIGNAL_CHANGE:
  126. event = event ? NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH :
  127. NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
  128. ieee80211_cqm_rssi_notify(vif, event, sig, GFP_KERNEL);
  129. break;
  130. case BCN_FILTER_CONNECTION_LOSS:
  131. ieee80211_connection_loss(vif);
  132. break;
  133. case BCN_FILTER_CONNECTED:
  134. rtwdev->beacon_loss = false;
  135. break;
  136. case BCN_FILTER_NOTIFY_BEACON_LOSS:
  137. rtwdev->beacon_loss = true;
  138. rtw_leave_lps(rtwdev);
  139. break;
  140. }
  141. }
  142. static void rtw_fw_bcn_filter_notify(struct rtw_dev *rtwdev, u8 *payload,
  143. u8 length)
  144. {
  145. struct rtw_beacon_filter_iter_data dev_iter_data;
  146. dev_iter_data.rtwdev = rtwdev;
  147. dev_iter_data.payload = payload;
  148. rtw_iterate_vifs(rtwdev, rtw_fw_bcn_filter_notify_vif_iter,
  149. &dev_iter_data);
  150. }
  151. static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
  152. u8 length)
  153. {
  154. struct rtw_dm_info *dm_info = &rtwdev->dm_info;
  155. dm_info->scan_density = payload[0];
  156. rtw_dbg(rtwdev, RTW_DBG_FW, "scan.density = %x\n",
  157. dm_info->scan_density);
  158. }
  159. static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload,
  160. u8 length)
  161. {
  162. struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
  163. struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload;
  164. rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
  165. "Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n",
  166. result->density, result->igi, result->l2h_th_init, result->l2h,
  167. result->h2l, result->option);
  168. rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n",
  169. rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
  170. edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask),
  171. rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
  172. edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask));
  173. rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n",
  174. rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ?
  175. "Set" : "Unset");
  176. }
  177. void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
  178. {
  179. struct rtw_c2h_cmd *c2h;
  180. u32 pkt_offset;
  181. u8 len;
  182. pkt_offset = *((u32 *)skb->cb);
  183. c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
  184. len = skb->len - pkt_offset - 2;
  185. mutex_lock(&rtwdev->mutex);
  186. if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags))
  187. goto unlock;
  188. switch (c2h->id) {
  189. case C2H_CCX_TX_RPT:
  190. rtw_tx_report_handle(rtwdev, skb, C2H_CCX_TX_RPT);
  191. break;
  192. case C2H_BT_INFO:
  193. rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
  194. break;
  195. case C2H_BT_HID_INFO:
  196. rtw_coex_bt_hid_info_notify(rtwdev, c2h->payload, len);
  197. break;
  198. case C2H_WLAN_INFO:
  199. rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
  200. break;
  201. case C2H_BCN_FILTER_NOTIFY:
  202. rtw_fw_bcn_filter_notify(rtwdev, c2h->payload, len);
  203. break;
  204. case C2H_HALMAC:
  205. rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
  206. break;
  207. case C2H_RA_RPT:
  208. rtw_fw_ra_report_handle(rtwdev, c2h->payload, len);
  209. break;
  210. default:
  211. rtw_dbg(rtwdev, RTW_DBG_FW, "C2H 0x%x isn't handled\n", c2h->id);
  212. break;
  213. }
  214. unlock:
  215. mutex_unlock(&rtwdev->mutex);
  216. }
  217. void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
  218. struct sk_buff *skb)
  219. {
  220. struct rtw_c2h_cmd *c2h;
  221. u8 len;
  222. c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
  223. len = skb->len - pkt_offset - 2;
  224. *((u32 *)skb->cb) = pkt_offset;
  225. rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
  226. c2h->id, c2h->seq, len);
  227. switch (c2h->id) {
  228. case C2H_BT_MP_INFO:
  229. rtw_coex_info_response(rtwdev, skb);
  230. break;
  231. case C2H_WLAN_RFON:
  232. complete(&rtwdev->lps_leave_check);
  233. dev_kfree_skb_any(skb);
  234. break;
  235. case C2H_SCAN_RESULT:
  236. complete(&rtwdev->fw_scan_density);
  237. rtw_fw_scan_result(rtwdev, c2h->payload, len);
  238. dev_kfree_skb_any(skb);
  239. break;
  240. case C2H_ADAPTIVITY:
  241. rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
  242. dev_kfree_skb_any(skb);
  243. break;
  244. default:
  245. /* pass offset for further operation */
  246. *((u32 *)skb->cb) = pkt_offset;
  247. skb_queue_tail(&rtwdev->c2h_queue, skb);
  248. ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
  249. break;
  250. }
  251. }
  252. EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
  253. void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev)
  254. {
  255. if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER)
  256. rtw_fw_recovery(rtwdev);
  257. else
  258. rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n");
  259. }
  260. EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
  261. static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
  262. u8 *h2c)
  263. {
  264. u8 box;
  265. u8 box_state;
  266. u32 box_reg, box_ex_reg;
  267. int idx;
  268. int ret;
  269. rtw_dbg(rtwdev, RTW_DBG_FW,
  270. "send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
  271. h2c[3], h2c[2], h2c[1], h2c[0],
  272. h2c[7], h2c[6], h2c[5], h2c[4]);
  273. spin_lock(&rtwdev->h2c.lock);
  274. box = rtwdev->h2c.last_box_num;
  275. switch (box) {
  276. case 0:
  277. box_reg = REG_HMEBOX0;
  278. box_ex_reg = REG_HMEBOX0_EX;
  279. break;
  280. case 1:
  281. box_reg = REG_HMEBOX1;
  282. box_ex_reg = REG_HMEBOX1_EX;
  283. break;
  284. case 2:
  285. box_reg = REG_HMEBOX2;
  286. box_ex_reg = REG_HMEBOX2_EX;
  287. break;
  288. case 3:
  289. box_reg = REG_HMEBOX3;
  290. box_ex_reg = REG_HMEBOX3_EX;
  291. break;
  292. default:
  293. WARN(1, "invalid h2c mail box number\n");
  294. goto out;
  295. }
  296. ret = read_poll_timeout_atomic(rtw_read8, box_state,
  297. !((box_state >> box) & 0x1), 100, 3000,
  298. false, rtwdev, REG_HMETFR);
  299. if (ret) {
  300. rtw_err(rtwdev, "failed to send h2c command\n");
  301. goto out;
  302. }
  303. for (idx = 0; idx < 4; idx++)
  304. rtw_write8(rtwdev, box_reg + idx, h2c[idx]);
  305. for (idx = 0; idx < 4; idx++)
  306. rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]);
  307. if (++rtwdev->h2c.last_box_num >= 4)
  308. rtwdev->h2c.last_box_num = 0;
  309. out:
  310. spin_unlock(&rtwdev->h2c.lock);
  311. }
  312. void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c)
  313. {
  314. rtw_fw_send_h2c_command(rtwdev, h2c);
  315. }
  316. static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
  317. {
  318. int ret;
  319. spin_lock(&rtwdev->h2c.lock);
  320. FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
  321. ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
  322. if (ret)
  323. rtw_err(rtwdev, "failed to send h2c packet\n");
  324. rtwdev->h2c.seq++;
  325. spin_unlock(&rtwdev->h2c.lock);
  326. }
  327. void
  328. rtw_fw_send_general_info(struct rtw_dev *rtwdev)
  329. {
  330. struct rtw_fifo_conf *fifo = &rtwdev->fifo;
  331. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  332. u16 total_size = H2C_PKT_HDR_SIZE + 4;
  333. if (rtw_chip_wcpu_11n(rtwdev))
  334. return;
  335. rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
  336. SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
  337. GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
  338. fifo->rsvd_fw_txbuf_addr -
  339. fifo->rsvd_boundary);
  340. rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
  341. }
  342. void
  343. rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
  344. {
  345. struct rtw_hal *hal = &rtwdev->hal;
  346. struct rtw_efuse *efuse = &rtwdev->efuse;
  347. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  348. u16 total_size = H2C_PKT_HDR_SIZE + 8;
  349. u8 fw_rf_type = 0;
  350. if (rtw_chip_wcpu_11n(rtwdev))
  351. return;
  352. if (hal->rf_type == RF_1T1R)
  353. fw_rf_type = FW_RF_1T1R;
  354. else if (hal->rf_type == RF_2T2R)
  355. fw_rf_type = FW_RF_2T2R;
  356. rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
  357. SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
  358. PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
  359. PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
  360. PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
  361. PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
  362. PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
  363. rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
  364. }
  365. void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
  366. {
  367. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  368. u16 total_size = H2C_PKT_HDR_SIZE + 1;
  369. rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
  370. SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
  371. IQK_SET_CLEAR(h2c_pkt, para->clear);
  372. IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
  373. rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
  374. }
  375. EXPORT_SYMBOL(rtw_fw_do_iqk);
  376. void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start)
  377. {
  378. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  379. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION);
  380. RFK_SET_INFORM_START(h2c_pkt, start);
  381. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  382. }
  383. EXPORT_SYMBOL(rtw_fw_inform_rfk_status);
  384. void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
  385. {
  386. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  387. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
  388. SET_QUERY_BT_INFO(h2c_pkt, true);
  389. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  390. }
  391. void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
  392. {
  393. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  394. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
  395. SET_WL_CH_INFO_LINK(h2c_pkt, link);
  396. SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
  397. SET_WL_CH_INFO_BW(h2c_pkt, bw);
  398. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  399. }
  400. void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
  401. struct rtw_coex_info_req *req)
  402. {
  403. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  404. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
  405. SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
  406. SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
  407. SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
  408. SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
  409. SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
  410. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  411. }
  412. void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
  413. {
  414. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  415. u8 index = 0 - bt_pwr_dec_lvl;
  416. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
  417. SET_BT_TX_POWER_INDEX(h2c_pkt, index);
  418. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  419. }
  420. void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
  421. {
  422. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  423. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
  424. SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
  425. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  426. }
  427. void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
  428. u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
  429. {
  430. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  431. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
  432. SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
  433. SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
  434. SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
  435. SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
  436. SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
  437. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  438. }
  439. void rtw_fw_coex_query_hid_info(struct rtw_dev *rtwdev, u8 sub_id, u8 data)
  440. {
  441. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  442. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_HID_INFO);
  443. SET_COEX_QUERY_HID_INFO_SUBID(h2c_pkt, sub_id);
  444. SET_COEX_QUERY_HID_INFO_DATA1(h2c_pkt, data);
  445. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  446. }
  447. void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
  448. {
  449. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  450. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
  451. SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
  452. SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
  453. SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
  454. SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
  455. SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
  456. SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
  457. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  458. }
  459. void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
  460. {
  461. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  462. u8 rssi = ewma_rssi_read(&si->avg_rssi);
  463. bool stbc_en = si->stbc_en ? true : false;
  464. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
  465. SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
  466. SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
  467. SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
  468. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  469. }
  470. void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
  471. bool reset_ra_mask)
  472. {
  473. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  474. bool disable_pt = true;
  475. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
  476. SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
  477. SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
  478. SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
  479. SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
  480. SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
  481. SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
  482. SET_RA_INFO_NO_UPDATE(h2c_pkt, !reset_ra_mask);
  483. SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
  484. SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
  485. SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
  486. SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
  487. SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
  488. SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
  489. si->init_ra_lv = 0;
  490. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  491. }
  492. void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
  493. {
  494. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  495. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
  496. MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
  497. MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
  498. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  499. }
  500. void rtw_fw_update_wl_phy_info(struct rtw_dev *rtwdev)
  501. {
  502. struct rtw_traffic_stats *stats = &rtwdev->stats;
  503. struct rtw_dm_info *dm_info = &rtwdev->dm_info;
  504. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  505. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_PHY_INFO);
  506. SET_WL_PHY_INFO_TX_TP(h2c_pkt, stats->tx_throughput);
  507. SET_WL_PHY_INFO_RX_TP(h2c_pkt, stats->rx_throughput);
  508. SET_WL_PHY_INFO_TX_RATE_DESC(h2c_pkt, dm_info->tx_rate);
  509. SET_WL_PHY_INFO_RX_RATE_DESC(h2c_pkt, dm_info->curr_rx_rate);
  510. SET_WL_PHY_INFO_RX_EVM(h2c_pkt, dm_info->rx_evm_dbm[RF_PATH_A]);
  511. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  512. }
  513. void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
  514. struct ieee80211_vif *vif)
  515. {
  516. struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
  517. struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
  518. static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
  519. struct rtw_sta_info *si =
  520. sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
  521. s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
  522. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  523. if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
  524. return;
  525. if (!connect) {
  526. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
  527. SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
  528. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  529. return;
  530. }
  531. if (!si)
  532. return;
  533. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P0);
  534. ether_addr_copy(&h2c_pkt[1], bss_conf->bssid);
  535. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  536. memset(h2c_pkt, 0, sizeof(h2c_pkt));
  537. threshold = clamp_t(s32, threshold, rssi_min, rssi_max);
  538. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
  539. SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
  540. SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
  541. BCN_FILTER_OFFLOAD_MODE_DEFAULT);
  542. SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold);
  543. SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
  544. SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
  545. SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst);
  546. SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
  547. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  548. }
  549. void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
  550. {
  551. struct rtw_lps_conf *conf = &rtwdev->lps_conf;
  552. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  553. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
  554. SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
  555. SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
  556. SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
  557. SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
  558. SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
  559. SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
  560. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  561. }
  562. void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable)
  563. {
  564. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  565. struct rtw_fw_wow_keep_alive_para mode = {
  566. .adopt = true,
  567. .pkt_type = KEEP_ALIVE_NULL_PKT,
  568. .period = 5,
  569. };
  570. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE);
  571. SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable);
  572. SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt);
  573. SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type);
  574. SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period);
  575. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  576. }
  577. void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable)
  578. {
  579. struct rtw_wow_param *rtw_wow = &rtwdev->wow;
  580. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  581. struct rtw_fw_wow_disconnect_para mode = {
  582. .adopt = true,
  583. .period = 30,
  584. .retry_count = 5,
  585. };
  586. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION);
  587. if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
  588. SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable);
  589. SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt);
  590. SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period);
  591. SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count);
  592. }
  593. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  594. }
  595. void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
  596. {
  597. struct rtw_wow_param *rtw_wow = &rtwdev->wow;
  598. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  599. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN);
  600. SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable);
  601. if (rtw_wow_mgd_linked(rtwdev)) {
  602. if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
  603. SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable);
  604. if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
  605. SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable);
  606. if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags))
  607. SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable);
  608. if (rtw_wow->pattern_cnt)
  609. SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable);
  610. }
  611. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  612. }
  613. void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev,
  614. u8 pairwise_key_enc,
  615. u8 group_key_enc)
  616. {
  617. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  618. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO);
  619. SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc);
  620. SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc);
  621. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  622. }
  623. void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable)
  624. {
  625. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  626. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL);
  627. SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable);
  628. if (rtw_wow_no_link(rtwdev))
  629. SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable);
  630. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  631. }
  632. static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
  633. enum rtw_rsvd_packet_type type)
  634. {
  635. struct rtw_rsvd_page *rsvd_pkt;
  636. u8 location = 0;
  637. list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
  638. if (type == rsvd_pkt->type)
  639. location = rsvd_pkt->page;
  640. }
  641. return location;
  642. }
  643. void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable)
  644. {
  645. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  646. u8 loc_nlo;
  647. loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO);
  648. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO);
  649. SET_NLO_FUN_EN(h2c_pkt, enable);
  650. if (enable) {
  651. if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
  652. SET_NLO_PS_32K(h2c_pkt, enable);
  653. SET_NLO_IGNORE_SECURITY(h2c_pkt, enable);
  654. SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo);
  655. }
  656. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  657. }
  658. void rtw_fw_set_pg_info(struct rtw_dev *rtwdev)
  659. {
  660. struct rtw_lps_conf *conf = &rtwdev->lps_conf;
  661. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  662. u8 loc_pg, loc_dpk;
  663. loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO);
  664. loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK);
  665. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO);
  666. LPS_PG_INFO_LOC(h2c_pkt, loc_pg);
  667. LPS_PG_DPK_LOC(h2c_pkt, loc_dpk);
  668. LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup);
  669. LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup);
  670. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  671. }
  672. static u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev,
  673. struct cfg80211_ssid *ssid)
  674. {
  675. struct rtw_rsvd_page *rsvd_pkt;
  676. u8 location = 0;
  677. list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
  678. if (rsvd_pkt->type != RSVD_PROBE_REQ)
  679. continue;
  680. if ((!ssid && !rsvd_pkt->ssid) ||
  681. rtw_ssid_equal(rsvd_pkt->ssid, ssid))
  682. location = rsvd_pkt->page;
  683. }
  684. return location;
  685. }
  686. static u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev,
  687. struct cfg80211_ssid *ssid)
  688. {
  689. struct rtw_rsvd_page *rsvd_pkt;
  690. u16 size = 0;
  691. list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
  692. if (rsvd_pkt->type != RSVD_PROBE_REQ)
  693. continue;
  694. if ((!ssid && !rsvd_pkt->ssid) ||
  695. rtw_ssid_equal(rsvd_pkt->ssid, ssid))
  696. size = rsvd_pkt->probe_req_size;
  697. }
  698. return size;
  699. }
  700. void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
  701. {
  702. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  703. u8 location = 0;
  704. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
  705. location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
  706. *(h2c_pkt + 1) = location;
  707. rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
  708. location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
  709. *(h2c_pkt + 2) = location;
  710. rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
  711. location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
  712. *(h2c_pkt + 3) = location;
  713. rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
  714. location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
  715. *(h2c_pkt + 4) = location;
  716. rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
  717. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  718. }
  719. static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw)
  720. {
  721. struct rtw_dev *rtwdev = hw->priv;
  722. const struct rtw_chip_info *chip = rtwdev->chip;
  723. struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
  724. struct rtw_nlo_info_hdr *nlo_hdr;
  725. struct cfg80211_ssid *ssid;
  726. struct sk_buff *skb;
  727. u8 *pos, loc;
  728. u32 size;
  729. int i;
  730. if (!pno_req->inited || !pno_req->match_set_cnt)
  731. return NULL;
  732. size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt *
  733. IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz;
  734. skb = alloc_skb(size, GFP_KERNEL);
  735. if (!skb)
  736. return NULL;
  737. skb_reserve(skb, chip->tx_pkt_desc_sz);
  738. nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr));
  739. nlo_hdr->nlo_count = pno_req->match_set_cnt;
  740. nlo_hdr->hidden_ap_count = pno_req->match_set_cnt;
  741. /* pattern check for firmware */
  742. memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE);
  743. for (i = 0; i < pno_req->match_set_cnt; i++)
  744. nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len;
  745. for (i = 0; i < pno_req->match_set_cnt; i++) {
  746. ssid = &pno_req->match_sets[i].ssid;
  747. loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
  748. if (!loc) {
  749. rtw_err(rtwdev, "failed to get probe req rsvd loc\n");
  750. kfree_skb(skb);
  751. return NULL;
  752. }
  753. nlo_hdr->location[i] = loc;
  754. }
  755. for (i = 0; i < pno_req->match_set_cnt; i++) {
  756. pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN);
  757. memcpy(pos, pno_req->match_sets[i].ssid.ssid,
  758. pno_req->match_sets[i].ssid.ssid_len);
  759. }
  760. return skb;
  761. }
  762. static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw)
  763. {
  764. struct rtw_dev *rtwdev = hw->priv;
  765. const struct rtw_chip_info *chip = rtwdev->chip;
  766. struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req;
  767. struct ieee80211_channel *channels = pno_req->channels;
  768. struct sk_buff *skb;
  769. int count = pno_req->channel_cnt;
  770. u8 *pos;
  771. int i = 0;
  772. skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL);
  773. if (!skb)
  774. return NULL;
  775. skb_reserve(skb, chip->tx_pkt_desc_sz);
  776. for (i = 0; i < count; i++) {
  777. pos = skb_put_zero(skb, 4);
  778. CHSW_INFO_SET_CH(pos, channels[i].hw_value);
  779. if (channels[i].flags & IEEE80211_CHAN_RADAR)
  780. CHSW_INFO_SET_ACTION_ID(pos, 0);
  781. else
  782. CHSW_INFO_SET_ACTION_ID(pos, 1);
  783. CHSW_INFO_SET_TIMEOUT(pos, 1);
  784. CHSW_INFO_SET_PRI_CH_IDX(pos, 1);
  785. CHSW_INFO_SET_BW(pos, 0);
  786. }
  787. return skb;
  788. }
  789. static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw)
  790. {
  791. struct rtw_dev *rtwdev = hw->priv;
  792. const struct rtw_chip_info *chip = rtwdev->chip;
  793. struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info;
  794. struct rtw_lps_pg_dpk_hdr *dpk_hdr;
  795. struct sk_buff *skb;
  796. u32 size;
  797. size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr);
  798. skb = alloc_skb(size, GFP_KERNEL);
  799. if (!skb)
  800. return NULL;
  801. skb_reserve(skb, chip->tx_pkt_desc_sz);
  802. dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr));
  803. dpk_hdr->dpk_ch = dpk_info->dpk_ch;
  804. dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0];
  805. memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2);
  806. memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4);
  807. memcpy(dpk_hdr->coef, dpk_info->coef, 160);
  808. return skb;
  809. }
  810. static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw)
  811. {
  812. struct rtw_dev *rtwdev = hw->priv;
  813. const struct rtw_chip_info *chip = rtwdev->chip;
  814. struct rtw_lps_conf *conf = &rtwdev->lps_conf;
  815. struct rtw_lps_pg_info_hdr *pg_info_hdr;
  816. struct rtw_wow_param *rtw_wow = &rtwdev->wow;
  817. struct sk_buff *skb;
  818. u32 size;
  819. size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr);
  820. skb = alloc_skb(size, GFP_KERNEL);
  821. if (!skb)
  822. return NULL;
  823. skb_reserve(skb, chip->tx_pkt_desc_sz);
  824. pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr));
  825. pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num;
  826. pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
  827. pg_info_hdr->sec_cam_count =
  828. rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam);
  829. pg_info_hdr->pattern_count = rtw_wow->pattern_cnt;
  830. conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0;
  831. conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0;
  832. return skb;
  833. }
  834. static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
  835. struct rtw_rsvd_page *rsvd_pkt)
  836. {
  837. struct ieee80211_vif *vif;
  838. struct rtw_vif *rtwvif;
  839. struct sk_buff *skb_new;
  840. struct cfg80211_ssid *ssid;
  841. u16 tim_offset = 0;
  842. if (rsvd_pkt->type == RSVD_DUMMY) {
  843. skb_new = alloc_skb(1, GFP_KERNEL);
  844. if (!skb_new)
  845. return NULL;
  846. skb_put(skb_new, 1);
  847. return skb_new;
  848. }
  849. rtwvif = rsvd_pkt->rtwvif;
  850. if (!rtwvif)
  851. return NULL;
  852. vif = rtwvif_to_vif(rtwvif);
  853. switch (rsvd_pkt->type) {
  854. case RSVD_BEACON:
  855. skb_new = ieee80211_beacon_get_tim(hw, vif, &tim_offset, NULL, 0);
  856. rsvd_pkt->tim_offset = tim_offset;
  857. break;
  858. case RSVD_PS_POLL:
  859. skb_new = ieee80211_pspoll_get(hw, vif);
  860. break;
  861. case RSVD_PROBE_RESP:
  862. skb_new = ieee80211_proberesp_get(hw, vif);
  863. break;
  864. case RSVD_NULL:
  865. skb_new = ieee80211_nullfunc_get(hw, vif, -1, false);
  866. break;
  867. case RSVD_QOS_NULL:
  868. skb_new = ieee80211_nullfunc_get(hw, vif, -1, true);
  869. break;
  870. case RSVD_LPS_PG_DPK:
  871. skb_new = rtw_lps_pg_dpk_get(hw);
  872. break;
  873. case RSVD_LPS_PG_INFO:
  874. skb_new = rtw_lps_pg_info_get(hw);
  875. break;
  876. case RSVD_PROBE_REQ:
  877. ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid;
  878. if (ssid)
  879. skb_new = ieee80211_probereq_get(hw, vif->addr,
  880. ssid->ssid,
  881. ssid->ssid_len, 0);
  882. else
  883. skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0);
  884. if (skb_new)
  885. rsvd_pkt->probe_req_size = (u16)skb_new->len;
  886. break;
  887. case RSVD_NLO_INFO:
  888. skb_new = rtw_nlo_info_get(hw);
  889. break;
  890. case RSVD_CH_INFO:
  891. skb_new = rtw_cs_channel_info_get(hw);
  892. break;
  893. default:
  894. return NULL;
  895. }
  896. if (!skb_new)
  897. return NULL;
  898. return skb_new;
  899. }
  900. static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
  901. enum rtw_rsvd_packet_type type)
  902. {
  903. struct rtw_tx_pkt_info pkt_info = {0};
  904. const struct rtw_chip_info *chip = rtwdev->chip;
  905. u8 *pkt_desc;
  906. rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
  907. pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
  908. memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
  909. rtw_tx_fill_tx_desc(&pkt_info, skb);
  910. }
  911. static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
  912. {
  913. return DIV_ROUND_UP(len, page_size);
  914. }
  915. static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
  916. u8 page_margin, u32 page, u8 *buf,
  917. struct rtw_rsvd_page *rsvd_pkt)
  918. {
  919. struct sk_buff *skb = rsvd_pkt->skb;
  920. if (page >= 1)
  921. memcpy(buf + page_margin + page_size * (page - 1),
  922. skb->data, skb->len);
  923. else
  924. memcpy(buf, skb->data, skb->len);
  925. }
  926. static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev,
  927. enum rtw_rsvd_packet_type type,
  928. bool txdesc)
  929. {
  930. struct rtw_rsvd_page *rsvd_pkt = NULL;
  931. rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
  932. if (!rsvd_pkt)
  933. return NULL;
  934. INIT_LIST_HEAD(&rsvd_pkt->vif_list);
  935. INIT_LIST_HEAD(&rsvd_pkt->build_list);
  936. rsvd_pkt->type = type;
  937. rsvd_pkt->add_txdesc = txdesc;
  938. return rsvd_pkt;
  939. }
  940. static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev,
  941. struct rtw_vif *rtwvif,
  942. struct rtw_rsvd_page *rsvd_pkt)
  943. {
  944. lockdep_assert_held(&rtwdev->mutex);
  945. list_add_tail(&rsvd_pkt->vif_list, &rtwvif->rsvd_page_list);
  946. }
  947. static void rtw_add_rsvd_page(struct rtw_dev *rtwdev,
  948. struct rtw_vif *rtwvif,
  949. enum rtw_rsvd_packet_type type,
  950. bool txdesc)
  951. {
  952. struct rtw_rsvd_page *rsvd_pkt;
  953. rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc);
  954. if (!rsvd_pkt) {
  955. rtw_err(rtwdev, "failed to alloc rsvd page %d\n", type);
  956. return;
  957. }
  958. rsvd_pkt->rtwvif = rtwvif;
  959. rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
  960. }
  961. static void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev,
  962. struct rtw_vif *rtwvif,
  963. struct cfg80211_ssid *ssid)
  964. {
  965. struct rtw_rsvd_page *rsvd_pkt;
  966. rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true);
  967. if (!rsvd_pkt) {
  968. rtw_err(rtwdev, "failed to alloc probe req rsvd page\n");
  969. return;
  970. }
  971. rsvd_pkt->rtwvif = rtwvif;
  972. rsvd_pkt->ssid = ssid;
  973. rtw_insert_rsvd_page(rtwdev, rtwvif, rsvd_pkt);
  974. }
  975. void rtw_remove_rsvd_page(struct rtw_dev *rtwdev,
  976. struct rtw_vif *rtwvif)
  977. {
  978. struct rtw_rsvd_page *rsvd_pkt, *tmp;
  979. lockdep_assert_held(&rtwdev->mutex);
  980. /* remove all of the rsvd pages for vif */
  981. list_for_each_entry_safe(rsvd_pkt, tmp, &rtwvif->rsvd_page_list,
  982. vif_list) {
  983. list_del(&rsvd_pkt->vif_list);
  984. if (!list_empty(&rsvd_pkt->build_list))
  985. list_del(&rsvd_pkt->build_list);
  986. kfree(rsvd_pkt);
  987. }
  988. }
  989. void rtw_add_rsvd_page_bcn(struct rtw_dev *rtwdev,
  990. struct rtw_vif *rtwvif)
  991. {
  992. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  993. if (vif->type != NL80211_IFTYPE_AP &&
  994. vif->type != NL80211_IFTYPE_ADHOC &&
  995. vif->type != NL80211_IFTYPE_MESH_POINT) {
  996. rtw_warn(rtwdev, "Cannot add beacon rsvd page for %d\n",
  997. vif->type);
  998. return;
  999. }
  1000. rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_BEACON, false);
  1001. }
  1002. void rtw_add_rsvd_page_pno(struct rtw_dev *rtwdev,
  1003. struct rtw_vif *rtwvif)
  1004. {
  1005. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  1006. struct rtw_wow_param *rtw_wow = &rtwdev->wow;
  1007. struct rtw_pno_request *rtw_pno_req = &rtw_wow->pno_req;
  1008. struct cfg80211_ssid *ssid;
  1009. int i;
  1010. if (vif->type != NL80211_IFTYPE_STATION) {
  1011. rtw_warn(rtwdev, "Cannot add PNO rsvd page for %d\n",
  1012. vif->type);
  1013. return;
  1014. }
  1015. for (i = 0 ; i < rtw_pno_req->match_set_cnt; i++) {
  1016. ssid = &rtw_pno_req->match_sets[i].ssid;
  1017. rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, ssid);
  1018. }
  1019. rtw_add_rsvd_page_probe_req(rtwdev, rtwvif, NULL);
  1020. rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NLO_INFO, false);
  1021. rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_CH_INFO, true);
  1022. }
  1023. void rtw_add_rsvd_page_sta(struct rtw_dev *rtwdev,
  1024. struct rtw_vif *rtwvif)
  1025. {
  1026. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  1027. if (vif->type != NL80211_IFTYPE_STATION) {
  1028. rtw_warn(rtwdev, "Cannot add sta rsvd page for %d\n",
  1029. vif->type);
  1030. return;
  1031. }
  1032. rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_PS_POLL, true);
  1033. rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_QOS_NULL, true);
  1034. rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_NULL, true);
  1035. rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_DPK, true);
  1036. rtw_add_rsvd_page(rtwdev, rtwvif, RSVD_LPS_PG_INFO, true);
  1037. }
  1038. int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
  1039. u8 *buf, u32 size)
  1040. {
  1041. u8 bckp[2];
  1042. u8 val;
  1043. u16 rsvd_pg_head;
  1044. u32 bcn_valid_addr;
  1045. u32 bcn_valid_mask;
  1046. int ret;
  1047. lockdep_assert_held(&rtwdev->mutex);
  1048. if (!size)
  1049. return -EINVAL;
  1050. if (rtw_chip_wcpu_11n(rtwdev)) {
  1051. rtw_write32_set(rtwdev, REG_DWBCN0_CTRL, BIT_BCN_VALID);
  1052. } else {
  1053. pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
  1054. pg_addr |= BIT_BCN_VALID_V1;
  1055. rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr);
  1056. }
  1057. val = rtw_read8(rtwdev, REG_CR + 1);
  1058. bckp[0] = val;
  1059. val |= BIT_ENSWBCN >> 8;
  1060. rtw_write8(rtwdev, REG_CR + 1, val);
  1061. val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
  1062. bckp[1] = val;
  1063. val &= ~(BIT_EN_BCNQ_DL >> 16);
  1064. rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
  1065. ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
  1066. if (ret) {
  1067. rtw_err(rtwdev, "failed to write data to rsvd page\n");
  1068. goto restore;
  1069. }
  1070. if (rtw_chip_wcpu_11n(rtwdev)) {
  1071. bcn_valid_addr = REG_DWBCN0_CTRL;
  1072. bcn_valid_mask = BIT_BCN_VALID;
  1073. } else {
  1074. bcn_valid_addr = REG_FIFOPAGE_CTRL_2;
  1075. bcn_valid_mask = BIT_BCN_VALID_V1;
  1076. }
  1077. if (!check_hw_ready(rtwdev, bcn_valid_addr, bcn_valid_mask, 1)) {
  1078. rtw_err(rtwdev, "error beacon valid\n");
  1079. ret = -EBUSY;
  1080. }
  1081. restore:
  1082. rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
  1083. rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
  1084. rsvd_pg_head | BIT_BCN_VALID_V1);
  1085. rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
  1086. rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
  1087. return ret;
  1088. }
  1089. static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
  1090. {
  1091. u32 pg_size;
  1092. u32 pg_num = 0;
  1093. u16 pg_addr = 0;
  1094. pg_size = rtwdev->chip->page_size;
  1095. pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
  1096. if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
  1097. return -ENOMEM;
  1098. pg_addr = rtwdev->fifo.rsvd_drv_addr;
  1099. return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
  1100. }
  1101. static void __rtw_build_rsvd_page_reset(struct rtw_dev *rtwdev)
  1102. {
  1103. struct rtw_rsvd_page *rsvd_pkt, *tmp;
  1104. list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list,
  1105. build_list) {
  1106. list_del_init(&rsvd_pkt->build_list);
  1107. /* Don't free except for the dummy rsvd page,
  1108. * others will be freed when removing vif
  1109. */
  1110. if (rsvd_pkt->type == RSVD_DUMMY)
  1111. kfree(rsvd_pkt);
  1112. }
  1113. }
  1114. static void rtw_build_rsvd_page_iter(void *data, u8 *mac,
  1115. struct ieee80211_vif *vif)
  1116. {
  1117. struct rtw_dev *rtwdev = data;
  1118. struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
  1119. struct rtw_rsvd_page *rsvd_pkt;
  1120. list_for_each_entry(rsvd_pkt, &rtwvif->rsvd_page_list, vif_list) {
  1121. if (rsvd_pkt->type == RSVD_BEACON)
  1122. list_add(&rsvd_pkt->build_list,
  1123. &rtwdev->rsvd_page_list);
  1124. else
  1125. list_add_tail(&rsvd_pkt->build_list,
  1126. &rtwdev->rsvd_page_list);
  1127. }
  1128. }
  1129. static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev)
  1130. {
  1131. struct rtw_rsvd_page *rsvd_pkt;
  1132. __rtw_build_rsvd_page_reset(rtwdev);
  1133. /* gather rsvd page from vifs */
  1134. rtw_iterate_vifs_atomic(rtwdev, rtw_build_rsvd_page_iter, rtwdev);
  1135. rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
  1136. struct rtw_rsvd_page, build_list);
  1137. if (!rsvd_pkt) {
  1138. WARN(1, "Should not have an empty reserved page\n");
  1139. return -EINVAL;
  1140. }
  1141. /* the first rsvd should be beacon, otherwise add a dummy one */
  1142. if (rsvd_pkt->type != RSVD_BEACON) {
  1143. struct rtw_rsvd_page *dummy_pkt;
  1144. dummy_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_DUMMY, false);
  1145. if (!dummy_pkt) {
  1146. rtw_err(rtwdev, "failed to alloc dummy rsvd page\n");
  1147. return -ENOMEM;
  1148. }
  1149. list_add(&dummy_pkt->build_list, &rtwdev->rsvd_page_list);
  1150. }
  1151. return 0;
  1152. }
  1153. static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
  1154. {
  1155. struct ieee80211_hw *hw = rtwdev->hw;
  1156. const struct rtw_chip_info *chip = rtwdev->chip;
  1157. struct sk_buff *iter;
  1158. struct rtw_rsvd_page *rsvd_pkt;
  1159. u32 page = 0;
  1160. u8 total_page = 0;
  1161. u8 page_size, page_margin, tx_desc_sz;
  1162. u8 *buf;
  1163. int ret;
  1164. page_size = chip->page_size;
  1165. tx_desc_sz = chip->tx_pkt_desc_sz;
  1166. page_margin = page_size - tx_desc_sz;
  1167. ret = __rtw_build_rsvd_page_from_vifs(rtwdev);
  1168. if (ret) {
  1169. rtw_err(rtwdev,
  1170. "failed to build rsvd page from vifs, ret %d\n", ret);
  1171. return NULL;
  1172. }
  1173. list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
  1174. iter = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
  1175. if (!iter) {
  1176. rtw_err(rtwdev, "failed to build rsvd packet\n");
  1177. goto release_skb;
  1178. }
  1179. /* Fill the tx_desc for the rsvd pkt that requires one.
  1180. * And iter->len will be added with size of tx_desc_sz.
  1181. */
  1182. if (rsvd_pkt->add_txdesc)
  1183. rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type);
  1184. rsvd_pkt->skb = iter;
  1185. rsvd_pkt->page = total_page;
  1186. /* Reserved page is downloaded via TX path, and TX path will
  1187. * generate a tx_desc at the header to describe length of
  1188. * the buffer. If we are not counting page numbers with the
  1189. * size of tx_desc added at the first rsvd_pkt (usually a
  1190. * beacon, firmware default refer to the first page as the
  1191. * content of beacon), we could generate a buffer which size
  1192. * is smaller than the actual size of the whole rsvd_page
  1193. */
  1194. if (total_page == 0) {
  1195. if (rsvd_pkt->type != RSVD_BEACON &&
  1196. rsvd_pkt->type != RSVD_DUMMY) {
  1197. rtw_err(rtwdev, "first page should be a beacon\n");
  1198. goto release_skb;
  1199. }
  1200. total_page += rtw_len_to_page(iter->len + tx_desc_sz,
  1201. page_size);
  1202. } else {
  1203. total_page += rtw_len_to_page(iter->len, page_size);
  1204. }
  1205. }
  1206. if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
  1207. rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
  1208. goto release_skb;
  1209. }
  1210. *size = (total_page - 1) * page_size + page_margin;
  1211. buf = kzalloc(*size, GFP_KERNEL);
  1212. if (!buf)
  1213. goto release_skb;
  1214. /* Copy the content of each rsvd_pkt to the buf, and they should
  1215. * be aligned to the pages.
  1216. *
  1217. * Note that the first rsvd_pkt is a beacon no matter what vif->type.
  1218. * And that rsvd_pkt does not require tx_desc because when it goes
  1219. * through TX path, the TX path will generate one for it.
  1220. */
  1221. list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
  1222. rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
  1223. page, buf, rsvd_pkt);
  1224. if (page == 0)
  1225. page += rtw_len_to_page(rsvd_pkt->skb->len +
  1226. tx_desc_sz, page_size);
  1227. else
  1228. page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
  1229. kfree_skb(rsvd_pkt->skb);
  1230. rsvd_pkt->skb = NULL;
  1231. }
  1232. return buf;
  1233. release_skb:
  1234. list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, build_list) {
  1235. kfree_skb(rsvd_pkt->skb);
  1236. rsvd_pkt->skb = NULL;
  1237. }
  1238. return NULL;
  1239. }
  1240. static int rtw_download_beacon(struct rtw_dev *rtwdev)
  1241. {
  1242. struct ieee80211_hw *hw = rtwdev->hw;
  1243. struct rtw_rsvd_page *rsvd_pkt;
  1244. struct sk_buff *skb;
  1245. int ret = 0;
  1246. rsvd_pkt = list_first_entry_or_null(&rtwdev->rsvd_page_list,
  1247. struct rtw_rsvd_page, build_list);
  1248. if (!rsvd_pkt) {
  1249. rtw_err(rtwdev, "failed to get rsvd page from build list\n");
  1250. return -ENOENT;
  1251. }
  1252. if (rsvd_pkt->type != RSVD_BEACON &&
  1253. rsvd_pkt->type != RSVD_DUMMY) {
  1254. rtw_err(rtwdev, "invalid rsvd page type %d, should be beacon or dummy\n",
  1255. rsvd_pkt->type);
  1256. return -EINVAL;
  1257. }
  1258. skb = rtw_get_rsvd_page_skb(hw, rsvd_pkt);
  1259. if (!skb) {
  1260. rtw_err(rtwdev, "failed to get beacon skb\n");
  1261. return -ENOMEM;
  1262. }
  1263. ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
  1264. if (ret)
  1265. rtw_err(rtwdev, "failed to download drv rsvd page\n");
  1266. dev_kfree_skb(skb);
  1267. return ret;
  1268. }
  1269. int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev)
  1270. {
  1271. u8 *buf;
  1272. u32 size;
  1273. int ret;
  1274. buf = rtw_build_rsvd_page(rtwdev, &size);
  1275. if (!buf) {
  1276. rtw_err(rtwdev, "failed to build rsvd page pkt\n");
  1277. return -ENOMEM;
  1278. }
  1279. ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
  1280. if (ret) {
  1281. rtw_err(rtwdev, "failed to download drv rsvd page\n");
  1282. goto free;
  1283. }
  1284. /* The last thing is to download the *ONLY* beacon again, because
  1285. * the previous tx_desc is to describe the total rsvd page. Download
  1286. * the beacon again to replace the TX desc header, and we will get
  1287. * a correct tx_desc for the beacon in the rsvd page.
  1288. */
  1289. ret = rtw_download_beacon(rtwdev);
  1290. if (ret) {
  1291. rtw_err(rtwdev, "failed to download beacon\n");
  1292. goto free;
  1293. }
  1294. free:
  1295. kfree(buf);
  1296. return ret;
  1297. }
  1298. void rtw_fw_update_beacon_work(struct work_struct *work)
  1299. {
  1300. struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
  1301. update_beacon_work);
  1302. mutex_lock(&rtwdev->mutex);
  1303. rtw_fw_download_rsvd_page(rtwdev);
  1304. mutex_unlock(&rtwdev->mutex);
  1305. }
  1306. static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
  1307. u32 *buf, u32 residue, u16 start_pg)
  1308. {
  1309. u32 i;
  1310. u16 idx = 0;
  1311. u16 ctl;
  1312. ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
  1313. /* disable rx clock gate */
  1314. rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
  1315. do {
  1316. rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
  1317. for (i = FIFO_DUMP_ADDR + residue;
  1318. i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
  1319. buf[idx++] = rtw_read32(rtwdev, i);
  1320. size -= 4;
  1321. if (size == 0)
  1322. goto out;
  1323. }
  1324. residue = 0;
  1325. start_pg++;
  1326. } while (size);
  1327. out:
  1328. rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
  1329. /* restore rx clock gate */
  1330. rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
  1331. }
  1332. static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
  1333. u32 offset, u32 size, u32 *buf)
  1334. {
  1335. const struct rtw_chip_info *chip = rtwdev->chip;
  1336. u32 start_pg, residue;
  1337. if (sel >= RTW_FW_FIFO_MAX) {
  1338. rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n");
  1339. return;
  1340. }
  1341. if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE)
  1342. offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT;
  1343. residue = offset & (FIFO_PAGE_SIZE - 1);
  1344. start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel];
  1345. rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg);
  1346. }
  1347. static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
  1348. enum rtw_fw_fifo_sel sel,
  1349. u32 start_addr, u32 size)
  1350. {
  1351. switch (sel) {
  1352. case RTW_FW_FIFO_SEL_TX:
  1353. case RTW_FW_FIFO_SEL_RX:
  1354. if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel])
  1355. return false;
  1356. fallthrough;
  1357. default:
  1358. return true;
  1359. }
  1360. }
  1361. int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
  1362. u32 *buffer)
  1363. {
  1364. if (!rtwdev->chip->fw_fifo_addr[0]) {
  1365. rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
  1366. return -ENOTSUPP;
  1367. }
  1368. if (size == 0 || !buffer)
  1369. return -EINVAL;
  1370. if (size & 0x3) {
  1371. rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n");
  1372. return -EINVAL;
  1373. }
  1374. if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) {
  1375. rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n");
  1376. return -EINVAL;
  1377. }
  1378. rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer);
  1379. return 0;
  1380. }
  1381. static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size,
  1382. u8 location)
  1383. {
  1384. const struct rtw_chip_info *chip = rtwdev->chip;
  1385. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  1386. u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN;
  1387. rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT);
  1388. SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
  1389. UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id);
  1390. UPDATE_PKT_SET_LOCATION(h2c_pkt, location);
  1391. /* include txdesc size */
  1392. size += chip->tx_pkt_desc_sz;
  1393. UPDATE_PKT_SET_SIZE(h2c_pkt, size);
  1394. rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
  1395. }
  1396. void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
  1397. struct cfg80211_ssid *ssid)
  1398. {
  1399. u8 loc;
  1400. u16 size;
  1401. loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid);
  1402. if (!loc) {
  1403. rtw_err(rtwdev, "failed to get probe_req rsvd loc\n");
  1404. return;
  1405. }
  1406. size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid);
  1407. if (!size) {
  1408. rtw_err(rtwdev, "failed to get probe_req rsvd size\n");
  1409. return;
  1410. }
  1411. __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc);
  1412. }
  1413. void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
  1414. {
  1415. struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req;
  1416. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  1417. u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN;
  1418. u8 loc_ch_info;
  1419. const struct rtw_ch_switch_option cs_option = {
  1420. .dest_ch_en = 1,
  1421. .dest_ch = 1,
  1422. .periodic_option = 2,
  1423. .normal_period = 5,
  1424. .normal_period_sel = 0,
  1425. .normal_cycle = 10,
  1426. .slow_period = 1,
  1427. .slow_period_sel = 1,
  1428. };
  1429. rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH);
  1430. SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
  1431. CH_SWITCH_SET_START(h2c_pkt, enable);
  1432. CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en);
  1433. CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch);
  1434. CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period);
  1435. CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel);
  1436. CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period);
  1437. CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel);
  1438. CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle);
  1439. CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option);
  1440. CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt);
  1441. CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4);
  1442. loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO);
  1443. CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info);
  1444. rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
  1445. }
  1446. void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
  1447. {
  1448. struct rtw_dm_info *dm_info = &rtwdev->dm_info;
  1449. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  1450. if (!rtw_edcca_enabled) {
  1451. dm_info->edcca_mode = RTW_EDCCA_NORMAL;
  1452. rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
  1453. "EDCCA disabled by debugfs\n");
  1454. }
  1455. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
  1456. SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
  1457. SET_ADAPTIVITY_OPTION(h2c_pkt, 1);
  1458. SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
  1459. SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
  1460. SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
  1461. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  1462. }
  1463. void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
  1464. {
  1465. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  1466. SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SCAN);
  1467. SET_SCAN_START(h2c_pkt, start);
  1468. rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
  1469. }
  1470. static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
  1471. struct sk_buff_head *list, u8 *bands,
  1472. struct rtw_vif *rtwvif)
  1473. {
  1474. const struct rtw_chip_info *chip = rtwdev->chip;
  1475. struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
  1476. struct sk_buff *new;
  1477. u8 idx;
  1478. for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
  1479. if (!(BIT(idx) & chip->band))
  1480. continue;
  1481. new = skb_copy(skb, GFP_KERNEL);
  1482. if (!new)
  1483. return -ENOMEM;
  1484. skb_put_data(new, ies->ies[idx], ies->len[idx]);
  1485. skb_put_data(new, ies->common_ies, ies->common_ie_len);
  1486. skb_queue_tail(list, new);
  1487. (*bands)++;
  1488. }
  1489. return 0;
  1490. }
  1491. static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
  1492. struct sk_buff_head *probe_req_list)
  1493. {
  1494. const struct rtw_chip_info *chip = rtwdev->chip;
  1495. struct sk_buff *skb, *tmp;
  1496. u8 page_offset = 1, *buf, page_size = chip->page_size;
  1497. u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
  1498. u16 buf_offset = page_size * page_offset;
  1499. u8 tx_desc_sz = chip->tx_pkt_desc_sz;
  1500. u8 page_cnt, pages;
  1501. unsigned int pkt_len;
  1502. int ret;
  1503. if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM))
  1504. page_cnt = RTW_OLD_PROBE_PG_CNT;
  1505. else
  1506. page_cnt = RTW_PROBE_PG_CNT;
  1507. pages = page_offset + num_probes * page_cnt;
  1508. buf = kzalloc(page_size * pages, GFP_KERNEL);
  1509. if (!buf)
  1510. return -ENOMEM;
  1511. buf_offset -= tx_desc_sz;
  1512. skb_queue_walk_safe(probe_req_list, skb, tmp) {
  1513. skb_unlink(skb, probe_req_list);
  1514. rtw_fill_rsvd_page_desc(rtwdev, skb, RSVD_PROBE_REQ);
  1515. if (skb->len > page_size * page_cnt) {
  1516. ret = -EINVAL;
  1517. goto out;
  1518. }
  1519. memcpy(buf + buf_offset, skb->data, skb->len);
  1520. pkt_len = skb->len - tx_desc_sz;
  1521. loc = pg_addr - rtwdev->fifo.rsvd_boundary + page_offset;
  1522. __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, pkt_len, loc);
  1523. buf_offset += page_cnt * page_size;
  1524. page_offset += page_cnt;
  1525. kfree_skb(skb);
  1526. }
  1527. ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, buf_offset);
  1528. if (ret) {
  1529. rtw_err(rtwdev, "Download probe request to firmware failed\n");
  1530. goto out;
  1531. }
  1532. rtwdev->scan_info.probe_pg_size = page_offset;
  1533. out:
  1534. kfree(buf);
  1535. skb_queue_walk_safe(probe_req_list, skb, tmp)
  1536. kfree_skb(skb);
  1537. return ret;
  1538. }
  1539. static int rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev,
  1540. struct rtw_vif *rtwvif)
  1541. {
  1542. struct cfg80211_scan_request *req = rtwvif->scan_req;
  1543. struct sk_buff_head list;
  1544. struct sk_buff *skb, *tmp;
  1545. u8 num = req->n_ssids, i, bands = 0;
  1546. int ret;
  1547. skb_queue_head_init(&list);
  1548. for (i = 0; i < num; i++) {
  1549. skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
  1550. req->ssids[i].ssid,
  1551. req->ssids[i].ssid_len,
  1552. req->ie_len);
  1553. if (!skb) {
  1554. ret = -ENOMEM;
  1555. goto out;
  1556. }
  1557. ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
  1558. rtwvif);
  1559. if (ret)
  1560. goto out;
  1561. kfree_skb(skb);
  1562. }
  1563. return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
  1564. out:
  1565. skb_queue_walk_safe(&list, skb, tmp)
  1566. kfree_skb(skb);
  1567. return ret;
  1568. }
  1569. static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
  1570. struct rtw_chan_list *list, u8 *buf)
  1571. {
  1572. u8 *chan = &buf[list->size];
  1573. u8 info_size = RTW_CH_INFO_SIZE;
  1574. if (list->size > list->buf_size)
  1575. return -ENOMEM;
  1576. CH_INFO_SET_CH(chan, info->channel);
  1577. CH_INFO_SET_PRI_CH_IDX(chan, info->pri_ch_idx);
  1578. CH_INFO_SET_BW(chan, info->bw);
  1579. CH_INFO_SET_TIMEOUT(chan, info->timeout);
  1580. CH_INFO_SET_ACTION_ID(chan, info->action_id);
  1581. CH_INFO_SET_EXTRA_INFO(chan, info->extra_info);
  1582. if (info->extra_info) {
  1583. EXTRA_CH_INFO_SET_ID(chan, RTW_SCAN_EXTRA_ID_DFS);
  1584. EXTRA_CH_INFO_SET_INFO(chan, RTW_SCAN_EXTRA_ACTION_SCAN);
  1585. EXTRA_CH_INFO_SET_SIZE(chan, RTW_EX_CH_INFO_SIZE -
  1586. RTW_EX_CH_INFO_HDR_SIZE);
  1587. EXTRA_CH_INFO_SET_DFS_EXT_TIME(chan, RTW_DFS_CHAN_TIME);
  1588. info_size += RTW_EX_CH_INFO_SIZE;
  1589. }
  1590. list->size += info_size;
  1591. list->ch_num++;
  1592. return 0;
  1593. }
  1594. static int rtw_add_chan_list(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
  1595. struct rtw_chan_list *list, u8 *buf)
  1596. {
  1597. struct cfg80211_scan_request *req = rtwvif->scan_req;
  1598. struct rtw_fifo_conf *fifo = &rtwdev->fifo;
  1599. struct ieee80211_channel *channel;
  1600. int i, ret = 0;
  1601. for (i = 0; i < req->n_channels; i++) {
  1602. struct rtw_chan_info ch_info = {0};
  1603. channel = req->channels[i];
  1604. ch_info.channel = channel->hw_value;
  1605. ch_info.bw = RTW_SCAN_WIDTH;
  1606. ch_info.pri_ch_idx = RTW_PRI_CH_IDX;
  1607. ch_info.timeout = req->duration_mandatory ?
  1608. req->duration : RTW_CHANNEL_TIME;
  1609. if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) {
  1610. ch_info.action_id = RTW_CHANNEL_RADAR;
  1611. ch_info.extra_info = 1;
  1612. /* Overwrite duration for passive scans if necessary */
  1613. ch_info.timeout = ch_info.timeout > RTW_PASS_CHAN_TIME ?
  1614. ch_info.timeout : RTW_PASS_CHAN_TIME;
  1615. } else {
  1616. ch_info.action_id = RTW_CHANNEL_ACTIVE;
  1617. }
  1618. ret = rtw_add_chan_info(rtwdev, &ch_info, list, buf);
  1619. if (ret)
  1620. return ret;
  1621. }
  1622. if (list->size > fifo->rsvd_pg_num << TX_PAGE_SIZE_SHIFT) {
  1623. rtw_err(rtwdev, "List exceeds rsvd page total size\n");
  1624. return -EINVAL;
  1625. }
  1626. list->addr = fifo->rsvd_h2c_info_addr + rtwdev->scan_info.probe_pg_size;
  1627. ret = rtw_fw_write_data_rsvd_page(rtwdev, list->addr, buf, list->size);
  1628. if (ret)
  1629. rtw_err(rtwdev, "Download channel list failed\n");
  1630. return ret;
  1631. }
  1632. static void rtw_fw_set_scan_offload(struct rtw_dev *rtwdev,
  1633. struct rtw_ch_switch_option *opt,
  1634. struct rtw_vif *rtwvif,
  1635. struct rtw_chan_list *list)
  1636. {
  1637. struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
  1638. struct cfg80211_scan_request *req = rtwvif->scan_req;
  1639. struct rtw_fifo_conf *fifo = &rtwdev->fifo;
  1640. /* reserve one dummy page at the beginning for tx descriptor */
  1641. u8 pkt_loc = fifo->rsvd_h2c_info_addr - fifo->rsvd_boundary + 1;
  1642. bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
  1643. u8 h2c_pkt[H2C_PKT_SIZE] = {0};
  1644. rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_SCAN_OFFLOAD);
  1645. SET_PKT_H2C_TOTAL_LEN(h2c_pkt, H2C_PKT_CH_SWITCH_LEN);
  1646. SCAN_OFFLOAD_SET_START(h2c_pkt, opt->switch_en);
  1647. SCAN_OFFLOAD_SET_BACK_OP_EN(h2c_pkt, opt->back_op_en);
  1648. SCAN_OFFLOAD_SET_RANDOM_SEQ_EN(h2c_pkt, random_seq);
  1649. SCAN_OFFLOAD_SET_NO_CCK_EN(h2c_pkt, req->no_cck);
  1650. SCAN_OFFLOAD_SET_CH_NUM(h2c_pkt, list->ch_num);
  1651. SCAN_OFFLOAD_SET_CH_INFO_SIZE(h2c_pkt, list->size);
  1652. SCAN_OFFLOAD_SET_CH_INFO_LOC(h2c_pkt, list->addr - fifo->rsvd_boundary);
  1653. SCAN_OFFLOAD_SET_OP_CH(h2c_pkt, scan_info->op_chan);
  1654. SCAN_OFFLOAD_SET_OP_PRI_CH_IDX(h2c_pkt, scan_info->op_pri_ch_idx);
  1655. SCAN_OFFLOAD_SET_OP_BW(h2c_pkt, scan_info->op_bw);
  1656. SCAN_OFFLOAD_SET_OP_PORT_ID(h2c_pkt, rtwvif->port);
  1657. SCAN_OFFLOAD_SET_OP_DWELL_TIME(h2c_pkt, req->duration_mandatory ?
  1658. req->duration : RTW_CHANNEL_TIME);
  1659. SCAN_OFFLOAD_SET_OP_GAP_TIME(h2c_pkt, RTW_OFF_CHAN_TIME);
  1660. SCAN_OFFLOAD_SET_SSID_NUM(h2c_pkt, req->n_ssids);
  1661. SCAN_OFFLOAD_SET_PKT_LOC(h2c_pkt, pkt_loc);
  1662. rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
  1663. }
  1664. void rtw_hw_scan_start(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
  1665. struct ieee80211_scan_request *scan_req)
  1666. {
  1667. struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
  1668. struct cfg80211_scan_request *req = &scan_req->req;
  1669. u8 mac_addr[ETH_ALEN];
  1670. rtwdev->scan_info.scanning_vif = vif;
  1671. rtwvif->scan_ies = &scan_req->ies;
  1672. rtwvif->scan_req = req;
  1673. ieee80211_stop_queues(rtwdev->hw);
  1674. rtw_leave_lps_deep(rtwdev);
  1675. rtw_hci_flush_all_queues(rtwdev, false);
  1676. rtw_mac_flush_all_queues(rtwdev, false);
  1677. if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
  1678. get_random_mask_addr(mac_addr, req->mac_addr,
  1679. req->mac_addr_mask);
  1680. else
  1681. ether_addr_copy(mac_addr, vif->addr);
  1682. rtw_core_scan_start(rtwdev, rtwvif, mac_addr, true);
  1683. rtwdev->hal.rcr &= ~BIT_CBSSID_BCN;
  1684. rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
  1685. }
  1686. void rtw_hw_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
  1687. bool aborted)
  1688. {
  1689. struct cfg80211_scan_info info = {
  1690. .aborted = aborted,
  1691. };
  1692. struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
  1693. struct rtw_hal *hal = &rtwdev->hal;
  1694. struct rtw_vif *rtwvif;
  1695. u8 chan = scan_info->op_chan;
  1696. if (!vif)
  1697. return;
  1698. rtwdev->hal.rcr |= BIT_CBSSID_BCN;
  1699. rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
  1700. rtw_core_scan_complete(rtwdev, vif, true);
  1701. rtwvif = (struct rtw_vif *)vif->drv_priv;
  1702. if (chan)
  1703. rtw_store_op_chan(rtwdev, false);
  1704. rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
  1705. ieee80211_wake_queues(rtwdev->hw);
  1706. ieee80211_scan_completed(rtwdev->hw, &info);
  1707. rtwvif->scan_req = NULL;
  1708. rtwvif->scan_ies = NULL;
  1709. rtwdev->scan_info.scanning_vif = NULL;
  1710. }
  1711. static int rtw_hw_scan_prehandle(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
  1712. struct rtw_chan_list *list)
  1713. {
  1714. struct cfg80211_scan_request *req = rtwvif->scan_req;
  1715. int size = req->n_channels * (RTW_CH_INFO_SIZE + RTW_EX_CH_INFO_SIZE);
  1716. u8 *buf;
  1717. int ret;
  1718. buf = kmalloc(size, GFP_KERNEL);
  1719. if (!buf)
  1720. return -ENOMEM;
  1721. ret = rtw_hw_scan_update_probe_req(rtwdev, rtwvif);
  1722. if (ret) {
  1723. rtw_err(rtwdev, "Update probe request failed\n");
  1724. goto out;
  1725. }
  1726. list->buf_size = size;
  1727. list->size = 0;
  1728. list->ch_num = 0;
  1729. ret = rtw_add_chan_list(rtwdev, rtwvif, list, buf);
  1730. out:
  1731. kfree(buf);
  1732. return ret;
  1733. }
  1734. int rtw_hw_scan_offload(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
  1735. bool enable)
  1736. {
  1737. struct rtw_vif *rtwvif = vif ? (struct rtw_vif *)vif->drv_priv : NULL;
  1738. struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
  1739. struct rtw_ch_switch_option cs_option = {0};
  1740. struct rtw_chan_list chan_list = {0};
  1741. int ret = 0;
  1742. if (!rtwvif)
  1743. return -EINVAL;
  1744. cs_option.switch_en = enable;
  1745. cs_option.back_op_en = scan_info->op_chan != 0;
  1746. if (enable) {
  1747. ret = rtw_hw_scan_prehandle(rtwdev, rtwvif, &chan_list);
  1748. if (ret)
  1749. goto out;
  1750. }
  1751. rtw_fw_set_scan_offload(rtwdev, &cs_option, rtwvif, &chan_list);
  1752. out:
  1753. return ret;
  1754. }
  1755. void rtw_hw_scan_abort(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
  1756. {
  1757. if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_SCAN_OFFLOAD))
  1758. return;
  1759. rtw_hw_scan_offload(rtwdev, vif, false);
  1760. rtw_hw_scan_complete(rtwdev, vif, true);
  1761. }
  1762. void rtw_hw_scan_status_report(struct rtw_dev *rtwdev, struct sk_buff *skb)
  1763. {
  1764. struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
  1765. struct rtw_c2h_cmd *c2h;
  1766. bool aborted;
  1767. u8 rc;
  1768. if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
  1769. return;
  1770. c2h = get_c2h_from_skb(skb);
  1771. rc = GET_SCAN_REPORT_RETURN_CODE(c2h->payload);
  1772. aborted = rc != RTW_SCAN_REPORT_SUCCESS;
  1773. rtw_hw_scan_complete(rtwdev, vif, aborted);
  1774. if (aborted)
  1775. rtw_dbg(rtwdev, RTW_DBG_HW_SCAN, "HW scan aborted with code: %d\n", rc);
  1776. }
  1777. void rtw_store_op_chan(struct rtw_dev *rtwdev, bool backup)
  1778. {
  1779. struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
  1780. struct rtw_hal *hal = &rtwdev->hal;
  1781. u8 band;
  1782. if (backup) {
  1783. scan_info->op_chan = hal->current_channel;
  1784. scan_info->op_bw = hal->current_band_width;
  1785. scan_info->op_pri_ch_idx = hal->current_primary_channel_index;
  1786. scan_info->op_pri_ch = hal->primary_channel;
  1787. } else {
  1788. band = scan_info->op_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
  1789. rtw_update_channel(rtwdev, scan_info->op_chan,
  1790. scan_info->op_pri_ch,
  1791. band, scan_info->op_bw);
  1792. }
  1793. }
  1794. void rtw_clear_op_chan(struct rtw_dev *rtwdev)
  1795. {
  1796. struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
  1797. scan_info->op_chan = 0;
  1798. scan_info->op_bw = 0;
  1799. scan_info->op_pri_ch_idx = 0;
  1800. scan_info->op_pri_ch = 0;
  1801. }
  1802. static bool rtw_is_op_chan(struct rtw_dev *rtwdev, u8 channel)
  1803. {
  1804. struct rtw_hw_scan_info *scan_info = &rtwdev->scan_info;
  1805. return channel == scan_info->op_chan;
  1806. }
  1807. void rtw_hw_scan_chan_switch(struct rtw_dev *rtwdev, struct sk_buff *skb)
  1808. {
  1809. struct rtw_hal *hal = &rtwdev->hal;
  1810. struct rtw_c2h_cmd *c2h;
  1811. enum rtw_scan_notify_id id;
  1812. u8 chan, band, status;
  1813. if (!test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
  1814. return;
  1815. c2h = get_c2h_from_skb(skb);
  1816. chan = GET_CHAN_SWITCH_CENTRAL_CH(c2h->payload);
  1817. id = GET_CHAN_SWITCH_ID(c2h->payload);
  1818. status = GET_CHAN_SWITCH_STATUS(c2h->payload);
  1819. if (id == RTW_SCAN_NOTIFY_ID_POSTSWITCH) {
  1820. band = chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
  1821. rtw_update_channel(rtwdev, chan, chan, band,
  1822. RTW_CHANNEL_WIDTH_20);
  1823. if (rtw_is_op_chan(rtwdev, chan)) {
  1824. rtw_store_op_chan(rtwdev, false);
  1825. ieee80211_wake_queues(rtwdev->hw);
  1826. }
  1827. } else if (id == RTW_SCAN_NOTIFY_ID_PRESWITCH) {
  1828. if (IS_CH_5G_BAND(chan)) {
  1829. rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
  1830. } else if (IS_CH_2G_BAND(chan)) {
  1831. u8 chan_type;
  1832. if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags))
  1833. chan_type = COEX_SWITCH_TO_24G;
  1834. else
  1835. chan_type = COEX_SWITCH_TO_24G_NOFORSCAN;
  1836. rtw_coex_switchband_notify(rtwdev, chan_type);
  1837. }
  1838. /* The channel of C2H RTW_SCAN_NOTIFY_ID_PRESWITCH is next
  1839. * channel that hardware will switch. We need to stop queue
  1840. * if next channel is non-op channel.
  1841. */
  1842. if (!rtw_is_op_chan(rtwdev, chan) &&
  1843. rtw_is_op_chan(rtwdev, hal->current_channel))
  1844. ieee80211_stop_queues(rtwdev->hw);
  1845. }
  1846. rtw_dbg(rtwdev, RTW_DBG_HW_SCAN,
  1847. "Chan switch: %x, id: %x, status: %x\n", chan, id, status);
  1848. }