mac.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2018-2019 Realtek Corporation
  3. */
  4. #include "main.h"
  5. #include "mac.h"
  6. #include "reg.h"
  7. #include "fw.h"
  8. #include "debug.h"
  9. void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
  10. u8 primary_ch_idx)
  11. {
  12. u8 txsc40 = 0, txsc20 = 0;
  13. u32 value32;
  14. u8 value8;
  15. txsc20 = primary_ch_idx;
  16. if (bw == RTW_CHANNEL_WIDTH_80) {
  17. if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
  18. txsc40 = RTW_SC_40_UPPER;
  19. else
  20. txsc40 = RTW_SC_40_LOWER;
  21. }
  22. rtw_write8(rtwdev, REG_DATA_SC,
  23. BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
  24. value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
  25. value32 &= ~BIT_RFMOD;
  26. switch (bw) {
  27. case RTW_CHANNEL_WIDTH_80:
  28. value32 |= BIT_RFMOD_80M;
  29. break;
  30. case RTW_CHANNEL_WIDTH_40:
  31. value32 |= BIT_RFMOD_40M;
  32. break;
  33. case RTW_CHANNEL_WIDTH_20:
  34. default:
  35. break;
  36. }
  37. rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
  38. if (rtw_chip_wcpu_11n(rtwdev))
  39. return;
  40. value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
  41. value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
  42. rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
  43. rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
  44. rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
  45. value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
  46. value8 = value8 & ~BIT_CHECK_CCK_EN;
  47. if (IS_CH_5G_BAND(channel))
  48. value8 |= BIT_CHECK_CCK_EN;
  49. rtw_write8(rtwdev, REG_CCK_CHECK, value8);
  50. }
  51. EXPORT_SYMBOL(rtw_set_channel_mac);
  52. static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
  53. {
  54. u32 value32;
  55. u8 value8;
  56. rtw_write8(rtwdev, REG_RSV_CTRL, 0);
  57. if (rtw_chip_wcpu_11n(rtwdev)) {
  58. if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO)
  59. rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL);
  60. else
  61. rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL);
  62. return 0;
  63. }
  64. switch (rtw_hci_type(rtwdev)) {
  65. case RTW_HCI_TYPE_PCIE:
  66. rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS);
  67. break;
  68. case RTW_HCI_TYPE_USB:
  69. break;
  70. default:
  71. return -EINVAL;
  72. }
  73. /* config PIN Mux */
  74. value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
  75. value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
  76. rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
  77. value32 = rtw_read32(rtwdev, REG_LED_CFG);
  78. value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
  79. rtw_write32(rtwdev, REG_LED_CFG, value32);
  80. value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
  81. value32 |= BIT_WLRFE_4_5_EN;
  82. rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
  83. /* disable BB/RF */
  84. value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
  85. value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
  86. rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
  87. value8 = rtw_read8(rtwdev, REG_RF_CTRL);
  88. value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
  89. rtw_write8(rtwdev, REG_RF_CTRL, value8);
  90. value32 = rtw_read32(rtwdev, REG_WLRF1);
  91. value32 &= ~BIT_WLRF1_BBRF_EN;
  92. rtw_write32(rtwdev, REG_WLRF1, value32);
  93. return 0;
  94. }
  95. static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
  96. {
  97. u32 val;
  98. target &= mask;
  99. return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target,
  100. 50, 50 * RTW_PWR_POLLING_CNT, false,
  101. rtwdev, addr) == 0;
  102. }
  103. static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
  104. const struct rtw_pwr_seq_cmd *cmd)
  105. {
  106. u8 value;
  107. u32 offset;
  108. if (cmd->base == RTW_PWR_ADDR_SDIO)
  109. offset = cmd->offset | SDIO_LOCAL_OFFSET;
  110. else
  111. offset = cmd->offset;
  112. if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
  113. return 0;
  114. if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE)
  115. goto err;
  116. /* if PCIE, toggle BIT_PFM_WOWL and try again */
  117. value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
  118. if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
  119. rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
  120. rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
  121. rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
  122. if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
  123. rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
  124. if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
  125. return 0;
  126. err:
  127. rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n",
  128. offset, cmd->mask, cmd->value);
  129. return -EBUSY;
  130. }
  131. static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
  132. u8 cut_mask,
  133. const struct rtw_pwr_seq_cmd *cmd)
  134. {
  135. const struct rtw_pwr_seq_cmd *cur_cmd;
  136. u32 offset;
  137. u8 value;
  138. for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
  139. if (!(cur_cmd->intf_mask & intf_mask) ||
  140. !(cur_cmd->cut_mask & cut_mask))
  141. continue;
  142. switch (cur_cmd->cmd) {
  143. case RTW_PWR_CMD_WRITE:
  144. offset = cur_cmd->offset;
  145. if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
  146. offset |= SDIO_LOCAL_OFFSET;
  147. value = rtw_read8(rtwdev, offset);
  148. value &= ~cur_cmd->mask;
  149. value |= (cur_cmd->value & cur_cmd->mask);
  150. rtw_write8(rtwdev, offset, value);
  151. break;
  152. case RTW_PWR_CMD_POLLING:
  153. if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
  154. return -EBUSY;
  155. break;
  156. case RTW_PWR_CMD_DELAY:
  157. if (cur_cmd->value == RTW_PWR_DELAY_US)
  158. udelay(cur_cmd->offset);
  159. else
  160. mdelay(cur_cmd->offset);
  161. break;
  162. case RTW_PWR_CMD_READ:
  163. break;
  164. default:
  165. return -EINVAL;
  166. }
  167. }
  168. return 0;
  169. }
  170. static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
  171. const struct rtw_pwr_seq_cmd **cmd_seq)
  172. {
  173. u8 cut_mask;
  174. u8 intf_mask;
  175. u8 cut;
  176. u32 idx = 0;
  177. const struct rtw_pwr_seq_cmd *cmd;
  178. int ret;
  179. cut = rtwdev->hal.cut_version;
  180. cut_mask = cut_version_to_mask(cut);
  181. switch (rtw_hci_type(rtwdev)) {
  182. case RTW_HCI_TYPE_PCIE:
  183. intf_mask = BIT(2);
  184. break;
  185. case RTW_HCI_TYPE_USB:
  186. intf_mask = BIT(1);
  187. break;
  188. default:
  189. return -EINVAL;
  190. }
  191. do {
  192. cmd = cmd_seq[idx];
  193. if (!cmd)
  194. break;
  195. ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
  196. if (ret)
  197. return ret;
  198. idx++;
  199. } while (1);
  200. return 0;
  201. }
  202. static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
  203. {
  204. const struct rtw_chip_info *chip = rtwdev->chip;
  205. const struct rtw_pwr_seq_cmd **pwr_seq;
  206. u8 rpwm;
  207. bool cur_pwr;
  208. int ret;
  209. if (rtw_chip_wcpu_11ac(rtwdev)) {
  210. rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
  211. /* Check FW still exist or not */
  212. if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
  213. rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
  214. rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
  215. }
  216. }
  217. if (rtw_read8(rtwdev, REG_CR) == 0xea)
  218. cur_pwr = false;
  219. else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
  220. (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
  221. cur_pwr = false;
  222. else
  223. cur_pwr = true;
  224. if (pwr_on == cur_pwr)
  225. return -EALREADY;
  226. pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
  227. ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
  228. if (ret)
  229. return ret;
  230. if (pwr_on)
  231. set_bit(RTW_FLAG_POWERON, rtwdev->flags);
  232. else
  233. clear_bit(RTW_FLAG_POWERON, rtwdev->flags);
  234. return 0;
  235. }
  236. static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
  237. {
  238. u8 sys_func_en = rtwdev->chip->sys_func_en;
  239. u8 value8;
  240. u32 value, tmp;
  241. value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
  242. value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
  243. rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
  244. rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
  245. value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
  246. rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
  247. /* disable boot-from-flash for driver's DL FW */
  248. tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
  249. if (tmp & BIT_BOOT_FSPI_EN) {
  250. rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
  251. value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
  252. rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
  253. }
  254. return 0;
  255. }
  256. static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev)
  257. {
  258. rtw_write8(rtwdev, REG_CR, 0xff);
  259. mdelay(2);
  260. rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f);
  261. mdelay(2);
  262. rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN);
  263. rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC);
  264. rtw_write16(rtwdev, REG_CR, 0x2ff);
  265. return 0;
  266. }
  267. static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
  268. {
  269. if (rtw_chip_wcpu_11n(rtwdev))
  270. return __rtw_mac_init_system_cfg_legacy(rtwdev);
  271. return __rtw_mac_init_system_cfg(rtwdev);
  272. }
  273. int rtw_mac_power_on(struct rtw_dev *rtwdev)
  274. {
  275. int ret = 0;
  276. ret = rtw_mac_pre_system_cfg(rtwdev);
  277. if (ret)
  278. goto err;
  279. ret = rtw_mac_power_switch(rtwdev, true);
  280. if (ret == -EALREADY) {
  281. rtw_mac_power_switch(rtwdev, false);
  282. ret = rtw_mac_pre_system_cfg(rtwdev);
  283. if (ret)
  284. goto err;
  285. ret = rtw_mac_power_switch(rtwdev, true);
  286. if (ret)
  287. goto err;
  288. } else if (ret) {
  289. goto err;
  290. }
  291. ret = rtw_mac_init_system_cfg(rtwdev);
  292. if (ret)
  293. goto err;
  294. return 0;
  295. err:
  296. rtw_err(rtwdev, "mac power on failed");
  297. return ret;
  298. }
  299. void rtw_mac_power_off(struct rtw_dev *rtwdev)
  300. {
  301. rtw_mac_power_switch(rtwdev, false);
  302. }
  303. static bool check_firmware_size(const u8 *data, u32 size)
  304. {
  305. const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
  306. u32 dmem_size;
  307. u32 imem_size;
  308. u32 emem_size;
  309. u32 real_size;
  310. dmem_size = le32_to_cpu(fw_hdr->dmem_size);
  311. imem_size = le32_to_cpu(fw_hdr->imem_size);
  312. emem_size = (fw_hdr->mem_usage & BIT(4)) ?
  313. le32_to_cpu(fw_hdr->emem_size) : 0;
  314. dmem_size += FW_HDR_CHKSUM_SIZE;
  315. imem_size += FW_HDR_CHKSUM_SIZE;
  316. emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
  317. real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
  318. if (real_size != size)
  319. return false;
  320. return true;
  321. }
  322. static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
  323. {
  324. if (enable) {
  325. /* cpu io interface enable */
  326. rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
  327. /* cpu enable */
  328. rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
  329. } else {
  330. /* cpu io interface disable */
  331. rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
  332. /* cpu disable */
  333. rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
  334. }
  335. }
  336. #define DLFW_RESTORE_REG_NUM 6
  337. static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
  338. struct rtw_backup_info *bckp)
  339. {
  340. u8 tmp;
  341. u8 bckp_idx = 0;
  342. /* set HIQ to hi priority */
  343. bckp[bckp_idx].len = 1;
  344. bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
  345. bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
  346. bckp_idx++;
  347. tmp = RTW_DMA_MAPPING_HIGH << 6;
  348. rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
  349. /* DLFW only use HIQ, map HIQ to hi priority */
  350. bckp[bckp_idx].len = 1;
  351. bckp[bckp_idx].reg = REG_CR;
  352. bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
  353. bckp_idx++;
  354. bckp[bckp_idx].len = 4;
  355. bckp[bckp_idx].reg = REG_H2CQ_CSR;
  356. bckp[bckp_idx].val = BIT_H2CQ_FULL;
  357. bckp_idx++;
  358. tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
  359. rtw_write8(rtwdev, REG_CR, tmp);
  360. rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
  361. /* Config hi priority queue and public priority queue page number */
  362. bckp[bckp_idx].len = 2;
  363. bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
  364. bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
  365. bckp_idx++;
  366. bckp[bckp_idx].len = 4;
  367. bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
  368. bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
  369. bckp_idx++;
  370. rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
  371. rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
  372. /* Disable beacon related functions */
  373. tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
  374. bckp[bckp_idx].len = 1;
  375. bckp[bckp_idx].reg = REG_BCN_CTRL;
  376. bckp[bckp_idx].val = tmp;
  377. bckp_idx++;
  378. tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
  379. rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
  380. WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
  381. }
  382. static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
  383. {
  384. rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
  385. rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
  386. rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
  387. rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
  388. }
  389. static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
  390. struct rtw_backup_info *bckp,
  391. u8 bckp_num)
  392. {
  393. rtw_restore_reg(rtwdev, bckp, bckp_num);
  394. }
  395. #define TX_DESC_SIZE 48
  396. static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
  397. const u8 *data, u32 size)
  398. {
  399. u8 *buf;
  400. int ret;
  401. buf = kmemdup(data, size, GFP_KERNEL);
  402. if (!buf)
  403. return -ENOMEM;
  404. ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
  405. kfree(buf);
  406. return ret;
  407. }
  408. static int
  409. send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
  410. {
  411. int ret;
  412. if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
  413. !((size + TX_DESC_SIZE) & (512 - 1)))
  414. size += 1;
  415. ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
  416. if (ret)
  417. rtw_err(rtwdev, "failed to download rsvd page\n");
  418. return ret;
  419. }
  420. static int
  421. iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
  422. {
  423. rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
  424. rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
  425. rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
  426. if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
  427. return -EBUSY;
  428. return 0;
  429. }
  430. static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
  431. u32 len, u8 first)
  432. {
  433. u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
  434. if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
  435. return -EBUSY;
  436. ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
  437. if (!first)
  438. ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
  439. if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
  440. return -EBUSY;
  441. return 0;
  442. }
  443. int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size)
  444. {
  445. u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE;
  446. if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) {
  447. rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n");
  448. return -EBUSY;
  449. }
  450. ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN;
  451. if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) {
  452. rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n");
  453. return -EBUSY;
  454. }
  455. return 0;
  456. }
  457. static bool
  458. check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
  459. {
  460. u8 fw_ctrl;
  461. fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
  462. if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
  463. if (addr < OCPBASE_DMEM_88XX) {
  464. fw_ctrl |= BIT_IMEM_DW_OK;
  465. fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
  466. rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
  467. } else {
  468. fw_ctrl |= BIT_DMEM_DW_OK;
  469. fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
  470. rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
  471. }
  472. rtw_err(rtwdev, "invalid fw checksum\n");
  473. return false;
  474. }
  475. if (addr < OCPBASE_DMEM_88XX) {
  476. fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
  477. rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
  478. } else {
  479. fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
  480. rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
  481. }
  482. return true;
  483. }
  484. static int
  485. download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
  486. u32 src, u32 dst, u32 size)
  487. {
  488. const struct rtw_chip_info *chip = rtwdev->chip;
  489. u32 desc_size = chip->tx_pkt_desc_sz;
  490. u8 first_part;
  491. u32 mem_offset;
  492. u32 residue_size;
  493. u32 pkt_size;
  494. u32 max_size = 0x1000;
  495. u32 val;
  496. int ret;
  497. mem_offset = 0;
  498. first_part = 1;
  499. residue_size = size;
  500. val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
  501. val |= BIT_DDMACH0_RESET_CHKSUM_STS;
  502. rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
  503. while (residue_size) {
  504. if (residue_size >= max_size)
  505. pkt_size = max_size;
  506. else
  507. pkt_size = residue_size;
  508. ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
  509. data + mem_offset, pkt_size);
  510. if (ret)
  511. return ret;
  512. ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
  513. src + desc_size,
  514. dst + mem_offset, pkt_size,
  515. first_part);
  516. if (ret)
  517. return ret;
  518. first_part = 0;
  519. mem_offset += pkt_size;
  520. residue_size -= pkt_size;
  521. }
  522. if (!check_fw_checksum(rtwdev, dst))
  523. return -EINVAL;
  524. return 0;
  525. }
  526. static int
  527. start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
  528. {
  529. const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
  530. const u8 *cur_fw;
  531. u16 val;
  532. u32 imem_size;
  533. u32 dmem_size;
  534. u32 emem_size;
  535. u32 addr;
  536. int ret;
  537. dmem_size = le32_to_cpu(fw_hdr->dmem_size);
  538. imem_size = le32_to_cpu(fw_hdr->imem_size);
  539. emem_size = (fw_hdr->mem_usage & BIT(4)) ?
  540. le32_to_cpu(fw_hdr->emem_size) : 0;
  541. dmem_size += FW_HDR_CHKSUM_SIZE;
  542. imem_size += FW_HDR_CHKSUM_SIZE;
  543. emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
  544. val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
  545. val |= BIT_MCUFWDL_EN;
  546. rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
  547. cur_fw = data + FW_HDR_SIZE;
  548. addr = le32_to_cpu(fw_hdr->dmem_addr);
  549. addr &= ~BIT(31);
  550. ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
  551. if (ret)
  552. return ret;
  553. cur_fw = data + FW_HDR_SIZE + dmem_size;
  554. addr = le32_to_cpu(fw_hdr->imem_addr);
  555. addr &= ~BIT(31);
  556. ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
  557. if (ret)
  558. return ret;
  559. if (emem_size) {
  560. cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
  561. addr = le32_to_cpu(fw_hdr->emem_addr);
  562. addr &= ~BIT(31);
  563. ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
  564. emem_size);
  565. if (ret)
  566. return ret;
  567. }
  568. return 0;
  569. }
  570. static int download_firmware_validate(struct rtw_dev *rtwdev)
  571. {
  572. u32 fw_key;
  573. if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
  574. fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
  575. if (fw_key == ILLEGAL_KEY_GROUP)
  576. rtw_err(rtwdev, "invalid fw key\n");
  577. return -EINVAL;
  578. }
  579. return 0;
  580. }
  581. static void download_firmware_end_flow(struct rtw_dev *rtwdev)
  582. {
  583. u16 fw_ctrl;
  584. rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
  585. /* Check IMEM & DMEM checksum is OK or not */
  586. fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
  587. if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
  588. return;
  589. fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
  590. rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
  591. }
  592. static int __rtw_download_firmware(struct rtw_dev *rtwdev,
  593. struct rtw_fw_state *fw)
  594. {
  595. struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
  596. const u8 *data = fw->firmware->data;
  597. u32 size = fw->firmware->size;
  598. u32 ltecoex_bckp;
  599. int ret;
  600. if (!check_firmware_size(data, size))
  601. return -EINVAL;
  602. if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
  603. return -EBUSY;
  604. wlan_cpu_enable(rtwdev, false);
  605. download_firmware_reg_backup(rtwdev, bckp);
  606. download_firmware_reset_platform(rtwdev);
  607. ret = start_download_firmware(rtwdev, data, size);
  608. if (ret)
  609. goto dlfw_fail;
  610. download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
  611. download_firmware_end_flow(rtwdev);
  612. wlan_cpu_enable(rtwdev, true);
  613. if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
  614. return -EBUSY;
  615. ret = download_firmware_validate(rtwdev);
  616. if (ret)
  617. goto dlfw_fail;
  618. /* reset desc and index */
  619. rtw_hci_setup(rtwdev);
  620. rtwdev->h2c.last_box_num = 0;
  621. rtwdev->h2c.seq = 0;
  622. set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
  623. return 0;
  624. dlfw_fail:
  625. /* Disable FWDL_EN */
  626. rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
  627. rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
  628. return ret;
  629. }
  630. static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en)
  631. {
  632. int try;
  633. if (en) {
  634. wlan_cpu_enable(rtwdev, false);
  635. wlan_cpu_enable(rtwdev, true);
  636. rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
  637. for (try = 0; try < 10; try++) {
  638. if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN)
  639. goto fwdl_ready;
  640. rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
  641. msleep(20);
  642. }
  643. rtw_err(rtwdev, "failed to check fw download ready\n");
  644. fwdl_ready:
  645. rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN);
  646. } else {
  647. rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
  648. }
  649. }
  650. static void
  651. write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
  652. {
  653. u32 val32;
  654. u32 block_nr;
  655. u32 remain_size;
  656. u32 write_addr = FW_START_ADDR_LEGACY;
  657. const __le32 *ptr = (const __le32 *)data;
  658. u32 block;
  659. __le32 remain_data = 0;
  660. block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY;
  661. remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1);
  662. val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
  663. val32 &= ~BIT_ROM_PGE;
  664. val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE;
  665. rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
  666. for (block = 0; block < block_nr; block++) {
  667. rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr));
  668. write_addr += DLFW_BLK_SIZE_LEGACY;
  669. ptr++;
  670. }
  671. if (remain_size) {
  672. memcpy(&remain_data, ptr, remain_size);
  673. rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
  674. }
  675. }
  676. static int
  677. download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
  678. {
  679. u32 page;
  680. u32 total_page;
  681. u32 last_page_size;
  682. data += sizeof(struct rtw_fw_hdr_legacy);
  683. size -= sizeof(struct rtw_fw_hdr_legacy);
  684. total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY;
  685. last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1);
  686. rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
  687. for (page = 0; page < total_page; page++) {
  688. write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
  689. data += DLFW_PAGE_SIZE_LEGACY;
  690. }
  691. if (last_page_size)
  692. write_firmware_page(rtwdev, page, data, last_page_size);
  693. if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
  694. rtw_err(rtwdev, "failed to check download firmware report\n");
  695. return -EINVAL;
  696. }
  697. return 0;
  698. }
  699. static int download_firmware_validate_legacy(struct rtw_dev *rtwdev)
  700. {
  701. u32 val32;
  702. int try;
  703. val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
  704. val32 |= BIT_MCUFWDL_RDY;
  705. val32 &= ~BIT_WINTINI_RDY;
  706. rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
  707. wlan_cpu_enable(rtwdev, false);
  708. wlan_cpu_enable(rtwdev, true);
  709. for (try = 0; try < 10; try++) {
  710. val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
  711. if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY)
  712. return 0;
  713. msleep(20);
  714. }
  715. rtw_err(rtwdev, "failed to validate firmware\n");
  716. return -EINVAL;
  717. }
  718. static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev,
  719. struct rtw_fw_state *fw)
  720. {
  721. int ret = 0;
  722. en_download_firmware_legacy(rtwdev, true);
  723. ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size);
  724. en_download_firmware_legacy(rtwdev, false);
  725. if (ret)
  726. goto out;
  727. ret = download_firmware_validate_legacy(rtwdev);
  728. if (ret)
  729. goto out;
  730. /* reset desc and index */
  731. rtw_hci_setup(rtwdev);
  732. rtwdev->h2c.last_box_num = 0;
  733. rtwdev->h2c.seq = 0;
  734. set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
  735. out:
  736. return ret;
  737. }
  738. int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
  739. {
  740. if (rtw_chip_wcpu_11n(rtwdev))
  741. return __rtw_download_firmware_legacy(rtwdev, fw);
  742. return __rtw_download_firmware(rtwdev, fw);
  743. }
  744. static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
  745. {
  746. const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
  747. u32 prio_queues = 0;
  748. if (queues & BIT(IEEE80211_AC_VO))
  749. prio_queues |= BIT(rqpn->dma_map_vo);
  750. if (queues & BIT(IEEE80211_AC_VI))
  751. prio_queues |= BIT(rqpn->dma_map_vi);
  752. if (queues & BIT(IEEE80211_AC_BE))
  753. prio_queues |= BIT(rqpn->dma_map_be);
  754. if (queues & BIT(IEEE80211_AC_BK))
  755. prio_queues |= BIT(rqpn->dma_map_bk);
  756. return prio_queues;
  757. }
  758. static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
  759. u32 prio_queue, bool drop)
  760. {
  761. const struct rtw_chip_info *chip = rtwdev->chip;
  762. const struct rtw_prioq_addr *addr;
  763. bool wsize;
  764. u16 avail_page, rsvd_page;
  765. int i;
  766. if (prio_queue >= RTW_DMA_MAPPING_MAX)
  767. return;
  768. addr = &chip->prioq_addrs->prio[prio_queue];
  769. wsize = chip->prioq_addrs->wsize;
  770. /* check if all of the reserved pages are available for 100 msecs */
  771. for (i = 0; i < 5; i++) {
  772. rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) :
  773. rtw_read8(rtwdev, addr->rsvd);
  774. avail_page = wsize ? rtw_read16(rtwdev, addr->avail) :
  775. rtw_read8(rtwdev, addr->avail);
  776. if (rsvd_page == avail_page)
  777. return;
  778. msleep(20);
  779. }
  780. /* priority queue is still not empty, throw a warning,
  781. *
  782. * Note that if we want to flush the tx queue when having a lot of
  783. * traffic (ex, 100Mbps up), some of the packets could be dropped.
  784. * And it requires like ~2secs to flush the full priority queue.
  785. */
  786. if (!drop)
  787. rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
  788. }
  789. static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
  790. u32 prio_queues, bool drop)
  791. {
  792. u32 q;
  793. for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
  794. if (prio_queues & BIT(q))
  795. __rtw_mac_flush_prio_queue(rtwdev, q, drop);
  796. }
  797. void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
  798. {
  799. u32 prio_queues = 0;
  800. /* If all of the hardware queues are requested to flush,
  801. * or the priority queues are not mapped yet,
  802. * flush all of the priority queues
  803. */
  804. if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
  805. prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
  806. else
  807. prio_queues = get_priority_queues(rtwdev, queues);
  808. rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
  809. }
  810. static int txdma_queue_mapping(struct rtw_dev *rtwdev)
  811. {
  812. const struct rtw_chip_info *chip = rtwdev->chip;
  813. const struct rtw_rqpn *rqpn = NULL;
  814. u16 txdma_pq_map = 0;
  815. switch (rtw_hci_type(rtwdev)) {
  816. case RTW_HCI_TYPE_PCIE:
  817. rqpn = &chip->rqpn_table[1];
  818. break;
  819. case RTW_HCI_TYPE_USB:
  820. if (rtwdev->hci.bulkout_num == 2)
  821. rqpn = &chip->rqpn_table[2];
  822. else if (rtwdev->hci.bulkout_num == 3)
  823. rqpn = &chip->rqpn_table[3];
  824. else if (rtwdev->hci.bulkout_num == 4)
  825. rqpn = &chip->rqpn_table[4];
  826. else
  827. return -EINVAL;
  828. break;
  829. default:
  830. return -EINVAL;
  831. }
  832. rtwdev->fifo.rqpn = rqpn;
  833. txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
  834. txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
  835. txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
  836. txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
  837. txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
  838. txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
  839. rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
  840. rtw_write8(rtwdev, REG_CR, 0);
  841. rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
  842. if (rtw_chip_wcpu_11ac(rtwdev))
  843. rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
  844. return 0;
  845. }
  846. static int set_trx_fifo_info(struct rtw_dev *rtwdev)
  847. {
  848. const struct rtw_chip_info *chip = rtwdev->chip;
  849. struct rtw_fifo_conf *fifo = &rtwdev->fifo;
  850. u16 cur_pg_addr;
  851. u8 csi_buf_pg_num = chip->csi_buf_pg_num;
  852. /* config rsvd page num */
  853. fifo->rsvd_drv_pg_num = 8;
  854. fifo->txff_pg_num = chip->txff_size >> 7;
  855. if (rtw_chip_wcpu_11n(rtwdev))
  856. fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num;
  857. else
  858. fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
  859. RSVD_PG_H2C_EXTRAINFO_NUM +
  860. RSVD_PG_H2C_STATICINFO_NUM +
  861. RSVD_PG_H2CQ_NUM +
  862. RSVD_PG_CPU_INSTRUCTION_NUM +
  863. RSVD_PG_FW_TXBUF_NUM +
  864. csi_buf_pg_num;
  865. if (fifo->rsvd_pg_num > fifo->txff_pg_num)
  866. return -ENOMEM;
  867. fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
  868. fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
  869. cur_pg_addr = fifo->txff_pg_num;
  870. if (rtw_chip_wcpu_11ac(rtwdev)) {
  871. cur_pg_addr -= csi_buf_pg_num;
  872. fifo->rsvd_csibuf_addr = cur_pg_addr;
  873. cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
  874. fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
  875. cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
  876. fifo->rsvd_cpu_instr_addr = cur_pg_addr;
  877. cur_pg_addr -= RSVD_PG_H2CQ_NUM;
  878. fifo->rsvd_h2cq_addr = cur_pg_addr;
  879. cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
  880. fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
  881. cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
  882. fifo->rsvd_h2c_info_addr = cur_pg_addr;
  883. }
  884. cur_pg_addr -= fifo->rsvd_drv_pg_num;
  885. fifo->rsvd_drv_addr = cur_pg_addr;
  886. if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
  887. rtw_err(rtwdev, "wrong rsvd driver address\n");
  888. return -EINVAL;
  889. }
  890. return 0;
  891. }
  892. static int __priority_queue_cfg(struct rtw_dev *rtwdev,
  893. const struct rtw_page_table *pg_tbl,
  894. u16 pubq_num)
  895. {
  896. const struct rtw_chip_info *chip = rtwdev->chip;
  897. struct rtw_fifo_conf *fifo = &rtwdev->fifo;
  898. rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
  899. rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
  900. rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
  901. rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
  902. rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
  903. rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
  904. rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
  905. rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
  906. rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
  907. rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
  908. rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
  909. rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
  910. rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
  911. if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
  912. return -EBUSY;
  913. rtw_write8(rtwdev, REG_CR + 3, 0);
  914. return 0;
  915. }
  916. static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
  917. const struct rtw_page_table *pg_tbl,
  918. u16 pubq_num)
  919. {
  920. const struct rtw_chip_info *chip = rtwdev->chip;
  921. struct rtw_fifo_conf *fifo = &rtwdev->fifo;
  922. u32 val32;
  923. val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
  924. rtw_write32(rtwdev, REG_RQPN_NPQ, val32);
  925. val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num);
  926. rtw_write32(rtwdev, REG_RQPN, val32);
  927. rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary);
  928. rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1);
  929. rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary);
  930. rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary);
  931. rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary);
  932. rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary);
  933. rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT);
  934. if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0))
  935. return -EBUSY;
  936. return 0;
  937. }
  938. static int priority_queue_cfg(struct rtw_dev *rtwdev)
  939. {
  940. const struct rtw_chip_info *chip = rtwdev->chip;
  941. struct rtw_fifo_conf *fifo = &rtwdev->fifo;
  942. const struct rtw_page_table *pg_tbl = NULL;
  943. u16 pubq_num;
  944. int ret;
  945. ret = set_trx_fifo_info(rtwdev);
  946. if (ret)
  947. return ret;
  948. switch (rtw_hci_type(rtwdev)) {
  949. case RTW_HCI_TYPE_PCIE:
  950. pg_tbl = &chip->page_table[1];
  951. break;
  952. case RTW_HCI_TYPE_USB:
  953. if (rtwdev->hci.bulkout_num == 2)
  954. pg_tbl = &chip->page_table[2];
  955. else if (rtwdev->hci.bulkout_num == 3)
  956. pg_tbl = &chip->page_table[3];
  957. else if (rtwdev->hci.bulkout_num == 4)
  958. pg_tbl = &chip->page_table[4];
  959. else
  960. return -EINVAL;
  961. break;
  962. default:
  963. return -EINVAL;
  964. }
  965. pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
  966. pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
  967. if (rtw_chip_wcpu_11n(rtwdev))
  968. return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num);
  969. else
  970. return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num);
  971. }
  972. static int init_h2c(struct rtw_dev *rtwdev)
  973. {
  974. struct rtw_fifo_conf *fifo = &rtwdev->fifo;
  975. u8 value8;
  976. u32 value32;
  977. u32 h2cq_addr;
  978. u32 h2cq_size;
  979. u32 h2cq_free;
  980. u32 wp, rp;
  981. if (rtw_chip_wcpu_11n(rtwdev))
  982. return 0;
  983. h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
  984. h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
  985. value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
  986. value32 = (value32 & 0xFFFC0000) | h2cq_addr;
  987. rtw_write32(rtwdev, REG_H2C_HEAD, value32);
  988. value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
  989. value32 = (value32 & 0xFFFC0000) | h2cq_addr;
  990. rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
  991. value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
  992. value32 &= 0xFFFC0000;
  993. value32 |= (h2cq_addr + h2cq_size);
  994. rtw_write32(rtwdev, REG_H2C_TAIL, value32);
  995. value8 = rtw_read8(rtwdev, REG_H2C_INFO);
  996. value8 = (u8)((value8 & 0xFC) | 0x01);
  997. rtw_write8(rtwdev, REG_H2C_INFO, value8);
  998. value8 = rtw_read8(rtwdev, REG_H2C_INFO);
  999. value8 = (u8)((value8 & 0xFB) | 0x04);
  1000. rtw_write8(rtwdev, REG_H2C_INFO, value8);
  1001. value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
  1002. value8 = (u8)((value8 & 0x7f) | 0x80);
  1003. rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
  1004. wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
  1005. rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
  1006. h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
  1007. if (h2cq_size != h2cq_free) {
  1008. rtw_err(rtwdev, "H2C queue mismatch\n");
  1009. return -EINVAL;
  1010. }
  1011. return 0;
  1012. }
  1013. static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
  1014. {
  1015. int ret;
  1016. ret = txdma_queue_mapping(rtwdev);
  1017. if (ret)
  1018. return ret;
  1019. ret = priority_queue_cfg(rtwdev);
  1020. if (ret)
  1021. return ret;
  1022. ret = init_h2c(rtwdev);
  1023. if (ret)
  1024. return ret;
  1025. return 0;
  1026. }
  1027. static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
  1028. {
  1029. u8 value8;
  1030. rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
  1031. if (rtw_chip_wcpu_11ac(rtwdev)) {
  1032. value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
  1033. value8 &= 0xF0;
  1034. /* For rxdesc len = 0 issue */
  1035. value8 |= 0xF;
  1036. rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
  1037. }
  1038. rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
  1039. rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
  1040. return 0;
  1041. }
  1042. int rtw_mac_init(struct rtw_dev *rtwdev)
  1043. {
  1044. const struct rtw_chip_info *chip = rtwdev->chip;
  1045. int ret;
  1046. ret = rtw_init_trx_cfg(rtwdev);
  1047. if (ret)
  1048. return ret;
  1049. ret = chip->ops->mac_init(rtwdev);
  1050. if (ret)
  1051. return ret;
  1052. ret = rtw_drv_info_cfg(rtwdev);
  1053. if (ret)
  1054. return ret;
  1055. rtw_hci_interface_cfg(rtwdev);
  1056. return 0;
  1057. }