mt76.h 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459
  1. /* SPDX-License-Identifier: ISC */
  2. /*
  3. * Copyright (C) 2016 Felix Fietkau <[email protected]>
  4. */
  5. #ifndef __MT76_H
  6. #define __MT76_H
  7. #include <linux/kernel.h>
  8. #include <linux/io.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/leds.h>
  12. #include <linux/usb.h>
  13. #include <linux/average.h>
  14. #include <linux/soc/mediatek/mtk_wed.h>
  15. #include <net/mac80211.h>
  16. #include "util.h"
  17. #include "testmode.h"
  18. #define MT_MCU_RING_SIZE 32
  19. #define MT_RX_BUF_SIZE 2048
  20. #define MT_SKB_HEAD_LEN 256
  21. #define MT_MAX_NON_AQL_PKT 16
  22. #define MT_TXQ_FREE_THR 32
  23. #define MT76_TOKEN_FREE_THR 64
  24. #define MT_QFLAG_WED_RING GENMASK(1, 0)
  25. #define MT_QFLAG_WED_TYPE GENMASK(3, 2)
  26. #define MT_QFLAG_WED BIT(4)
  27. #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \
  28. FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \
  29. FIELD_PREP(MT_QFLAG_WED_RING, _n))
  30. #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n)
  31. #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0)
  32. struct mt76_dev;
  33. struct mt76_phy;
  34. struct mt76_wcid;
  35. struct mt76s_intr;
  36. struct mt76_reg_pair {
  37. u32 reg;
  38. u32 value;
  39. };
  40. enum mt76_bus_type {
  41. MT76_BUS_MMIO,
  42. MT76_BUS_USB,
  43. MT76_BUS_SDIO,
  44. };
  45. enum mt76_wed_type {
  46. MT76_WED_Q_TX,
  47. MT76_WED_Q_TXFREE,
  48. };
  49. struct mt76_bus_ops {
  50. u32 (*rr)(struct mt76_dev *dev, u32 offset);
  51. void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
  52. u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
  53. void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
  54. int len);
  55. void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
  56. int len);
  57. int (*wr_rp)(struct mt76_dev *dev, u32 base,
  58. const struct mt76_reg_pair *rp, int len);
  59. int (*rd_rp)(struct mt76_dev *dev, u32 base,
  60. struct mt76_reg_pair *rp, int len);
  61. enum mt76_bus_type type;
  62. };
  63. #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
  64. #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
  65. #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO)
  66. enum mt76_txq_id {
  67. MT_TXQ_VO = IEEE80211_AC_VO,
  68. MT_TXQ_VI = IEEE80211_AC_VI,
  69. MT_TXQ_BE = IEEE80211_AC_BE,
  70. MT_TXQ_BK = IEEE80211_AC_BK,
  71. MT_TXQ_PSD,
  72. MT_TXQ_BEACON,
  73. MT_TXQ_CAB,
  74. __MT_TXQ_MAX
  75. };
  76. enum mt76_mcuq_id {
  77. MT_MCUQ_WM,
  78. MT_MCUQ_WA,
  79. MT_MCUQ_FWDL,
  80. __MT_MCUQ_MAX
  81. };
  82. enum mt76_rxq_id {
  83. MT_RXQ_MAIN,
  84. MT_RXQ_MCU,
  85. MT_RXQ_MCU_WA,
  86. MT_RXQ_BAND1,
  87. MT_RXQ_BAND1_WA,
  88. MT_RXQ_MAIN_WA,
  89. MT_RXQ_BAND2,
  90. MT_RXQ_BAND2_WA,
  91. __MT_RXQ_MAX
  92. };
  93. enum mt76_band_id {
  94. MT_BAND0,
  95. MT_BAND1,
  96. MT_BAND2,
  97. __MT_MAX_BAND
  98. };
  99. enum mt76_cipher_type {
  100. MT_CIPHER_NONE,
  101. MT_CIPHER_WEP40,
  102. MT_CIPHER_TKIP,
  103. MT_CIPHER_TKIP_NO_MIC,
  104. MT_CIPHER_AES_CCMP,
  105. MT_CIPHER_WEP104,
  106. MT_CIPHER_BIP_CMAC_128,
  107. MT_CIPHER_WEP128,
  108. MT_CIPHER_WAPI,
  109. MT_CIPHER_CCMP_CCX,
  110. MT_CIPHER_CCMP_256,
  111. MT_CIPHER_GCMP,
  112. MT_CIPHER_GCMP_256,
  113. };
  114. enum mt76_dfs_state {
  115. MT_DFS_STATE_UNKNOWN,
  116. MT_DFS_STATE_DISABLED,
  117. MT_DFS_STATE_CAC,
  118. MT_DFS_STATE_ACTIVE,
  119. };
  120. struct mt76_queue_buf {
  121. dma_addr_t addr;
  122. u16 len;
  123. bool skip_unmap;
  124. };
  125. struct mt76_tx_info {
  126. struct mt76_queue_buf buf[32];
  127. struct sk_buff *skb;
  128. int nbuf;
  129. u32 info;
  130. };
  131. struct mt76_queue_entry {
  132. union {
  133. void *buf;
  134. struct sk_buff *skb;
  135. };
  136. union {
  137. struct mt76_txwi_cache *txwi;
  138. struct urb *urb;
  139. int buf_sz;
  140. };
  141. u32 dma_addr[2];
  142. u16 dma_len[2];
  143. u16 wcid;
  144. bool skip_buf0:1;
  145. bool skip_buf1:1;
  146. bool done:1;
  147. };
  148. struct mt76_queue_regs {
  149. u32 desc_base;
  150. u32 ring_size;
  151. u32 cpu_idx;
  152. u32 dma_idx;
  153. } __packed __aligned(4);
  154. struct mt76_queue {
  155. struct mt76_queue_regs __iomem *regs;
  156. spinlock_t lock;
  157. spinlock_t cleanup_lock;
  158. struct mt76_queue_entry *entry;
  159. struct mt76_desc *desc;
  160. u16 first;
  161. u16 head;
  162. u16 tail;
  163. int ndesc;
  164. int queued;
  165. int buf_size;
  166. bool stopped;
  167. bool blocked;
  168. u8 buf_offset;
  169. u8 hw_idx;
  170. u8 flags;
  171. u32 wed_regs;
  172. dma_addr_t desc_dma;
  173. struct sk_buff *rx_head;
  174. struct page_frag_cache rx_page;
  175. };
  176. struct mt76_mcu_ops {
  177. u32 headroom;
  178. u32 tailroom;
  179. int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
  180. int len, bool wait_resp);
  181. int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
  182. int cmd, int *seq);
  183. int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
  184. struct sk_buff *skb, int seq);
  185. u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
  186. void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
  187. int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
  188. const struct mt76_reg_pair *rp, int len);
  189. int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
  190. struct mt76_reg_pair *rp, int len);
  191. int (*mcu_restart)(struct mt76_dev *dev);
  192. };
  193. struct mt76_queue_ops {
  194. int (*init)(struct mt76_dev *dev,
  195. int (*poll)(struct napi_struct *napi, int budget));
  196. int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
  197. int idx, int n_desc, int bufsize,
  198. u32 ring_base);
  199. int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
  200. enum mt76_txq_id qid, struct sk_buff *skb,
  201. struct mt76_wcid *wcid, struct ieee80211_sta *sta);
  202. int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
  203. struct sk_buff *skb, u32 tx_info);
  204. void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
  205. int *len, u32 *info, bool *more);
  206. void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
  207. void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
  208. bool flush);
  209. void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
  210. void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
  211. void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
  212. };
  213. enum mt76_phy_type {
  214. MT_PHY_TYPE_CCK,
  215. MT_PHY_TYPE_OFDM,
  216. MT_PHY_TYPE_HT,
  217. MT_PHY_TYPE_HT_GF,
  218. MT_PHY_TYPE_VHT,
  219. MT_PHY_TYPE_HE_SU = 8,
  220. MT_PHY_TYPE_HE_EXT_SU,
  221. MT_PHY_TYPE_HE_TB,
  222. MT_PHY_TYPE_HE_MU,
  223. __MT_PHY_TYPE_HE_MAX,
  224. };
  225. struct mt76_sta_stats {
  226. u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
  227. u64 tx_bw[4]; /* 20, 40, 80, 160 */
  228. u64 tx_nss[4]; /* 1, 2, 3, 4 */
  229. u64 tx_mcs[16]; /* mcs idx */
  230. u64 tx_bytes;
  231. u32 tx_packets;
  232. u32 tx_retries;
  233. u32 tx_failed;
  234. };
  235. enum mt76_wcid_flags {
  236. MT_WCID_FLAG_CHECK_PS,
  237. MT_WCID_FLAG_PS,
  238. MT_WCID_FLAG_4ADDR,
  239. MT_WCID_FLAG_HDR_TRANS,
  240. };
  241. #define MT76_N_WCIDS 544
  242. /* stored in ieee80211_tx_info::hw_queue */
  243. #define MT_TX_HW_QUEUE_PHY GENMASK(3, 2)
  244. DECLARE_EWMA(signal, 10, 8);
  245. #define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
  246. #define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
  247. #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
  248. #define MT_WCID_TX_INFO_SET BIT(31)
  249. struct mt76_wcid {
  250. struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
  251. atomic_t non_aql_packets;
  252. unsigned long flags;
  253. struct ewma_signal rssi;
  254. int inactive_count;
  255. struct rate_info rate;
  256. u16 idx;
  257. u8 hw_key_idx;
  258. u8 hw_key_idx2;
  259. u8 sta:1;
  260. u8 amsdu:1;
  261. u8 phy_idx:2;
  262. u8 rx_check_pn;
  263. u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6];
  264. u16 cipher;
  265. u32 tx_info;
  266. bool sw_iv;
  267. struct list_head list;
  268. struct idr pktid;
  269. struct mt76_sta_stats stats;
  270. };
  271. struct mt76_txq {
  272. u16 wcid;
  273. u16 agg_ssn;
  274. bool send_bar;
  275. bool aggr;
  276. };
  277. struct mt76_txwi_cache {
  278. struct list_head list;
  279. dma_addr_t dma_addr;
  280. struct sk_buff *skb;
  281. };
  282. struct mt76_rx_tid {
  283. struct rcu_head rcu_head;
  284. struct mt76_dev *dev;
  285. spinlock_t lock;
  286. struct delayed_work reorder_work;
  287. u16 head;
  288. u16 size;
  289. u16 nframes;
  290. u8 num;
  291. u8 started:1, stopped:1, timer_pending:1;
  292. struct sk_buff *reorder_buf[];
  293. };
  294. #define MT_TX_CB_DMA_DONE BIT(0)
  295. #define MT_TX_CB_TXS_DONE BIT(1)
  296. #define MT_TX_CB_TXS_FAILED BIT(2)
  297. #define MT_PACKET_ID_MASK GENMASK(6, 0)
  298. #define MT_PACKET_ID_NO_ACK 0
  299. #define MT_PACKET_ID_NO_SKB 1
  300. #define MT_PACKET_ID_WED 2
  301. #define MT_PACKET_ID_FIRST 3
  302. #define MT_PACKET_ID_HAS_RATE BIT(7)
  303. /* This is timer for when to give up when waiting for TXS callback,
  304. * with starting time being the time at which the DMA_DONE callback
  305. * was seen (so, we know packet was processed then, it should not take
  306. * long after that for firmware to send the TXS callback if it is going
  307. * to do so.)
  308. */
  309. #define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4)
  310. struct mt76_tx_cb {
  311. unsigned long jiffies;
  312. u16 wcid;
  313. u8 pktid;
  314. u8 flags;
  315. };
  316. enum {
  317. MT76_STATE_INITIALIZED,
  318. MT76_STATE_RUNNING,
  319. MT76_STATE_MCU_RUNNING,
  320. MT76_SCANNING,
  321. MT76_HW_SCANNING,
  322. MT76_HW_SCHED_SCANNING,
  323. MT76_RESTART,
  324. MT76_RESET,
  325. MT76_MCU_RESET,
  326. MT76_REMOVED,
  327. MT76_READING_STATS,
  328. MT76_STATE_POWER_OFF,
  329. MT76_STATE_SUSPEND,
  330. MT76_STATE_ROC,
  331. MT76_STATE_PM,
  332. };
  333. struct mt76_hw_cap {
  334. bool has_2ghz;
  335. bool has_5ghz;
  336. bool has_6ghz;
  337. };
  338. #define MT_DRV_TXWI_NO_FREE BIT(0)
  339. #define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
  340. #define MT_DRV_SW_RX_AIRTIME BIT(2)
  341. #define MT_DRV_RX_DMA_HDR BIT(3)
  342. #define MT_DRV_HW_MGMT_TXQ BIT(4)
  343. struct mt76_driver_ops {
  344. u32 drv_flags;
  345. u32 survey_flags;
  346. u16 txwi_size;
  347. u16 token_size;
  348. u8 mcs_rates;
  349. void (*update_survey)(struct mt76_phy *phy);
  350. int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
  351. enum mt76_txq_id qid, struct mt76_wcid *wcid,
  352. struct ieee80211_sta *sta,
  353. struct mt76_tx_info *tx_info);
  354. void (*tx_complete_skb)(struct mt76_dev *dev,
  355. struct mt76_queue_entry *e);
  356. bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
  357. bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
  358. void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
  359. struct sk_buff *skb);
  360. void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
  361. void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
  362. bool ps);
  363. int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
  364. struct ieee80211_sta *sta);
  365. void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
  366. struct ieee80211_sta *sta);
  367. void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
  368. struct ieee80211_sta *sta);
  369. };
  370. struct mt76_channel_state {
  371. u64 cc_active;
  372. u64 cc_busy;
  373. u64 cc_rx;
  374. u64 cc_bss_rx;
  375. u64 cc_tx;
  376. s8 noise;
  377. };
  378. struct mt76_sband {
  379. struct ieee80211_supported_band sband;
  380. struct mt76_channel_state *chan;
  381. };
  382. struct mt76_rate_power {
  383. union {
  384. struct {
  385. s8 cck[4];
  386. s8 ofdm[8];
  387. s8 stbc[10];
  388. s8 ht[16];
  389. s8 vht[10];
  390. };
  391. s8 all[48];
  392. };
  393. };
  394. /* addr req mask */
  395. #define MT_VEND_TYPE_EEPROM BIT(31)
  396. #define MT_VEND_TYPE_CFG BIT(30)
  397. #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
  398. #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
  399. enum mt_vendor_req {
  400. MT_VEND_DEV_MODE = 0x1,
  401. MT_VEND_WRITE = 0x2,
  402. MT_VEND_POWER_ON = 0x4,
  403. MT_VEND_MULTI_WRITE = 0x6,
  404. MT_VEND_MULTI_READ = 0x7,
  405. MT_VEND_READ_EEPROM = 0x9,
  406. MT_VEND_WRITE_FCE = 0x42,
  407. MT_VEND_WRITE_CFG = 0x46,
  408. MT_VEND_READ_CFG = 0x47,
  409. MT_VEND_READ_EXT = 0x63,
  410. MT_VEND_WRITE_EXT = 0x66,
  411. MT_VEND_FEATURE_SET = 0x91,
  412. };
  413. enum mt76u_in_ep {
  414. MT_EP_IN_PKT_RX,
  415. MT_EP_IN_CMD_RESP,
  416. __MT_EP_IN_MAX,
  417. };
  418. enum mt76u_out_ep {
  419. MT_EP_OUT_INBAND_CMD,
  420. MT_EP_OUT_AC_BE,
  421. MT_EP_OUT_AC_BK,
  422. MT_EP_OUT_AC_VI,
  423. MT_EP_OUT_AC_VO,
  424. MT_EP_OUT_HCCA,
  425. __MT_EP_OUT_MAX,
  426. };
  427. struct mt76_mcu {
  428. struct mutex mutex;
  429. u32 msg_seq;
  430. int timeout;
  431. struct sk_buff_head res_q;
  432. wait_queue_head_t wait;
  433. };
  434. #define MT_TX_SG_MAX_SIZE 8
  435. #define MT_RX_SG_MAX_SIZE 4
  436. #define MT_NUM_TX_ENTRIES 256
  437. #define MT_NUM_RX_ENTRIES 128
  438. #define MCU_RESP_URB_SIZE 1024
  439. struct mt76_usb {
  440. struct mutex usb_ctrl_mtx;
  441. u8 *data;
  442. u16 data_len;
  443. struct mt76_worker status_worker;
  444. struct mt76_worker rx_worker;
  445. struct work_struct stat_work;
  446. u8 out_ep[__MT_EP_OUT_MAX];
  447. u8 in_ep[__MT_EP_IN_MAX];
  448. bool sg_en;
  449. struct mt76u_mcu {
  450. u8 *data;
  451. /* multiple reads */
  452. struct mt76_reg_pair *rp;
  453. int rp_len;
  454. u32 base;
  455. } mcu;
  456. };
  457. #define MT76S_XMIT_BUF_SZ 0x3fe00
  458. #define MT76S_NUM_TX_ENTRIES 256
  459. #define MT76S_NUM_RX_ENTRIES 512
  460. struct mt76_sdio {
  461. struct mt76_worker txrx_worker;
  462. struct mt76_worker status_worker;
  463. struct mt76_worker net_worker;
  464. struct work_struct stat_work;
  465. u8 *xmit_buf;
  466. u32 xmit_buf_sz;
  467. struct sdio_func *func;
  468. void *intr_data;
  469. u8 hw_ver;
  470. wait_queue_head_t wait;
  471. struct {
  472. int pse_data_quota;
  473. int ple_data_quota;
  474. int pse_mcu_quota;
  475. int pse_page_size;
  476. int deficit;
  477. } sched;
  478. int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr);
  479. };
  480. struct mt76_mmio {
  481. void __iomem *regs;
  482. spinlock_t irq_lock;
  483. u32 irqmask;
  484. struct mtk_wed_device wed;
  485. };
  486. struct mt76_rx_status {
  487. union {
  488. struct mt76_wcid *wcid;
  489. u16 wcid_idx;
  490. };
  491. u32 reorder_time;
  492. u32 ampdu_ref;
  493. u32 timestamp;
  494. u8 iv[6];
  495. u8 phy_idx:2;
  496. u8 aggr:1;
  497. u8 qos_ctl;
  498. u16 seqno;
  499. u16 freq;
  500. u32 flag;
  501. u8 enc_flags;
  502. u8 encoding:2, bw:3, he_ru:3;
  503. u8 he_gi:2, he_dcm:1;
  504. u8 amsdu:1, first_amsdu:1, last_amsdu:1;
  505. u8 rate_idx;
  506. u8 nss;
  507. u8 band;
  508. s8 signal;
  509. u8 chains;
  510. s8 chain_signal[IEEE80211_MAX_CHAINS];
  511. };
  512. struct mt76_freq_range_power {
  513. const struct cfg80211_sar_freq_ranges *range;
  514. s8 power;
  515. };
  516. struct mt76_testmode_ops {
  517. int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state);
  518. int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
  519. enum mt76_testmode_state new_state);
  520. int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg);
  521. };
  522. struct mt76_testmode_data {
  523. enum mt76_testmode_state state;
  524. u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
  525. struct sk_buff *tx_skb;
  526. u32 tx_count;
  527. u16 tx_mpdu_len;
  528. u8 tx_rate_mode;
  529. u8 tx_rate_idx;
  530. u8 tx_rate_nss;
  531. u8 tx_rate_sgi;
  532. u8 tx_rate_ldpc;
  533. u8 tx_rate_stbc;
  534. u8 tx_ltf;
  535. u8 tx_antenna_mask;
  536. u8 tx_spe_idx;
  537. u8 tx_duty_cycle;
  538. u32 tx_time;
  539. u32 tx_ipg;
  540. u32 freq_offset;
  541. u8 tx_power[4];
  542. u8 tx_power_control;
  543. u8 addr[3][ETH_ALEN];
  544. u32 tx_pending;
  545. u32 tx_queued;
  546. u16 tx_queued_limit;
  547. u32 tx_done;
  548. struct {
  549. u64 packets[__MT_RXQ_MAX];
  550. u64 fcs_error[__MT_RXQ_MAX];
  551. } rx_stats;
  552. };
  553. struct mt76_vif {
  554. u8 idx;
  555. u8 omac_idx;
  556. u8 band_idx;
  557. u8 wmm_idx;
  558. u8 scan_seq_num;
  559. u8 cipher;
  560. };
  561. struct mt76_phy {
  562. struct ieee80211_hw *hw;
  563. struct mt76_dev *dev;
  564. void *priv;
  565. unsigned long state;
  566. u8 band_idx;
  567. struct mt76_queue *q_tx[__MT_TXQ_MAX];
  568. struct cfg80211_chan_def chandef;
  569. struct ieee80211_channel *main_chan;
  570. struct mt76_channel_state *chan_state;
  571. enum mt76_dfs_state dfs_state;
  572. ktime_t survey_time;
  573. struct mt76_hw_cap cap;
  574. struct mt76_sband sband_2g;
  575. struct mt76_sband sband_5g;
  576. struct mt76_sband sband_6g;
  577. u8 macaddr[ETH_ALEN];
  578. int txpower_cur;
  579. u8 antenna_mask;
  580. u16 chainmask;
  581. #ifdef CONFIG_NL80211_TESTMODE
  582. struct mt76_testmode_data test;
  583. #endif
  584. struct delayed_work mac_work;
  585. u8 mac_work_count;
  586. struct {
  587. struct sk_buff *head;
  588. struct sk_buff **tail;
  589. u16 seqno;
  590. } rx_amsdu[__MT_RXQ_MAX];
  591. struct mt76_freq_range_power *frp;
  592. };
  593. struct mt76_dev {
  594. struct mt76_phy phy; /* must be first */
  595. struct mt76_phy *phys[__MT_MAX_BAND];
  596. struct ieee80211_hw *hw;
  597. spinlock_t lock;
  598. spinlock_t cc_lock;
  599. u32 cur_cc_bss_rx;
  600. struct mt76_rx_status rx_ampdu_status;
  601. u32 rx_ampdu_len;
  602. u32 rx_ampdu_ref;
  603. struct mutex mutex;
  604. const struct mt76_bus_ops *bus;
  605. const struct mt76_driver_ops *drv;
  606. const struct mt76_mcu_ops *mcu_ops;
  607. struct device *dev;
  608. struct device *dma_dev;
  609. struct mt76_mcu mcu;
  610. struct net_device napi_dev;
  611. struct net_device tx_napi_dev;
  612. spinlock_t rx_lock;
  613. struct napi_struct napi[__MT_RXQ_MAX];
  614. struct sk_buff_head rx_skb[__MT_RXQ_MAX];
  615. struct list_head txwi_cache;
  616. struct mt76_queue *q_mcu[__MT_MCUQ_MAX];
  617. struct mt76_queue q_rx[__MT_RXQ_MAX];
  618. const struct mt76_queue_ops *queue_ops;
  619. int tx_dma_idx[4];
  620. struct mt76_worker tx_worker;
  621. struct napi_struct tx_napi;
  622. spinlock_t token_lock;
  623. struct idr token;
  624. u16 wed_token_count;
  625. u16 token_count;
  626. u16 token_size;
  627. wait_queue_head_t tx_wait;
  628. /* spinclock used to protect wcid pktid linked list */
  629. spinlock_t status_lock;
  630. u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
  631. u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
  632. u64 vif_mask;
  633. struct mt76_wcid global_wcid;
  634. struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
  635. struct list_head wcid_list;
  636. u32 rev;
  637. u32 aggr_stats[32];
  638. struct tasklet_struct pre_tbtt_tasklet;
  639. int beacon_int;
  640. u8 beacon_mask;
  641. struct debugfs_blob_wrapper eeprom;
  642. struct debugfs_blob_wrapper otp;
  643. struct mt76_rate_power rate_power;
  644. char alpha2[3];
  645. enum nl80211_dfs_regions region;
  646. u32 debugfs_reg;
  647. struct led_classdev led_cdev;
  648. char led_name[32];
  649. bool led_al;
  650. u8 led_pin;
  651. u8 csa_complete;
  652. u32 rxfilter;
  653. #ifdef CONFIG_NL80211_TESTMODE
  654. const struct mt76_testmode_ops *test_ops;
  655. struct {
  656. const char *name;
  657. u32 offset;
  658. } test_mtd;
  659. #endif
  660. struct workqueue_struct *wq;
  661. union {
  662. struct mt76_mmio mmio;
  663. struct mt76_usb usb;
  664. struct mt76_sdio sdio;
  665. };
  666. };
  667. struct mt76_power_limits {
  668. s8 cck[4];
  669. s8 ofdm[8];
  670. s8 mcs[4][10];
  671. s8 ru[7][12];
  672. };
  673. struct mt76_ethtool_worker_info {
  674. u64 *data;
  675. int idx;
  676. int initial_stat_idx;
  677. int worker_stat_count;
  678. int sta_count;
  679. };
  680. #define CCK_RATE(_idx, _rate) { \
  681. .bitrate = _rate, \
  682. .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
  683. .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
  684. .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \
  685. }
  686. #define OFDM_RATE(_idx, _rate) { \
  687. .bitrate = _rate, \
  688. .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
  689. .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
  690. }
  691. extern struct ieee80211_rate mt76_rates[12];
  692. #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
  693. #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
  694. #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
  695. #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__)
  696. #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__)
  697. #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
  698. #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
  699. #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
  700. #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
  701. #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
  702. #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
  703. #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
  704. #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
  705. #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
  706. #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
  707. #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
  708. #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
  709. #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
  710. #define mt76_get_field(_dev, _reg, _field) \
  711. FIELD_GET(_field, mt76_rr(dev, _reg))
  712. #define mt76_rmw_field(_dev, _reg, _field, _val) \
  713. mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
  714. #define __mt76_rmw_field(_dev, _reg, _field, _val) \
  715. __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
  716. #define mt76_hw(dev) (dev)->mphy.hw
  717. bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
  718. int timeout);
  719. #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
  720. bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
  721. int timeout, int kick);
  722. #define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10)
  723. #define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10)
  724. #define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
  725. void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
  726. void mt76_pci_disable_aspm(struct pci_dev *pdev);
  727. static inline u16 mt76_chip(struct mt76_dev *dev)
  728. {
  729. return dev->rev >> 16;
  730. }
  731. static inline u16 mt76_rev(struct mt76_dev *dev)
  732. {
  733. return dev->rev & 0xffff;
  734. }
  735. #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
  736. #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
  737. #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
  738. #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
  739. #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
  740. #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
  741. #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
  742. #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
  743. #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
  744. #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
  745. #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
  746. #define mt76_for_each_q_rx(dev, i) \
  747. for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
  748. if ((dev)->q_rx[i].ndesc)
  749. struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
  750. const struct ieee80211_ops *ops,
  751. const struct mt76_driver_ops *drv_ops);
  752. int mt76_register_device(struct mt76_dev *dev, bool vht,
  753. struct ieee80211_rate *rates, int n_rates);
  754. void mt76_unregister_device(struct mt76_dev *dev);
  755. void mt76_free_device(struct mt76_dev *dev);
  756. void mt76_unregister_phy(struct mt76_phy *phy);
  757. struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
  758. const struct ieee80211_ops *ops,
  759. u8 band_idx);
  760. int mt76_register_phy(struct mt76_phy *phy, bool vht,
  761. struct ieee80211_rate *rates, int n_rates);
  762. struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy,
  763. const struct file_operations *ops);
  764. static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
  765. {
  766. return mt76_register_debugfs_fops(&dev->phy, NULL);
  767. }
  768. int mt76_queues_read(struct seq_file *s, void *data);
  769. void mt76_seq_puts_array(struct seq_file *file, const char *str,
  770. s8 *val, int len);
  771. int mt76_eeprom_init(struct mt76_dev *dev, int len);
  772. void mt76_eeprom_override(struct mt76_phy *phy);
  773. int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
  774. struct mt76_queue *
  775. mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
  776. int ring_base, u32 flags);
  777. u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
  778. static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
  779. int n_desc, int ring_base, u32 flags)
  780. {
  781. struct mt76_queue *q;
  782. q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags);
  783. if (IS_ERR(q))
  784. return PTR_ERR(q);
  785. phy->q_tx[qid] = q;
  786. return 0;
  787. }
  788. static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx,
  789. int n_desc, int ring_base)
  790. {
  791. struct mt76_queue *q;
  792. q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0);
  793. if (IS_ERR(q))
  794. return PTR_ERR(q);
  795. dev->q_mcu[qid] = q;
  796. return 0;
  797. }
  798. static inline struct mt76_phy *
  799. mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx)
  800. {
  801. if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) ||
  802. (phy_idx == MT_BAND2 && dev->phys[phy_idx]))
  803. return dev->phys[phy_idx];
  804. return &dev->phy;
  805. }
  806. static inline struct ieee80211_hw *
  807. mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx)
  808. {
  809. return mt76_dev_phy(dev, phy_idx)->hw;
  810. }
  811. static inline u8 *
  812. mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
  813. {
  814. return (u8 *)t - dev->drv->txwi_size;
  815. }
  816. /* increment with wrap-around */
  817. static inline int mt76_incr(int val, int size)
  818. {
  819. return (val + 1) & (size - 1);
  820. }
  821. /* decrement with wrap-around */
  822. static inline int mt76_decr(int val, int size)
  823. {
  824. return (val - 1) & (size - 1);
  825. }
  826. u8 mt76_ac_to_hwq(u8 ac);
  827. static inline struct ieee80211_txq *
  828. mtxq_to_txq(struct mt76_txq *mtxq)
  829. {
  830. void *ptr = mtxq;
  831. return container_of(ptr, struct ieee80211_txq, drv_priv);
  832. }
  833. static inline struct ieee80211_sta *
  834. wcid_to_sta(struct mt76_wcid *wcid)
  835. {
  836. void *ptr = wcid;
  837. if (!wcid || !wcid->sta)
  838. return NULL;
  839. return container_of(ptr, struct ieee80211_sta, drv_priv);
  840. }
  841. static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
  842. {
  843. BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
  844. sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
  845. return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
  846. }
  847. static inline void *mt76_skb_get_hdr(struct sk_buff *skb)
  848. {
  849. struct mt76_rx_status mstat;
  850. u8 *data = skb->data;
  851. /* Alignment concerns */
  852. BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
  853. BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
  854. mstat = *((struct mt76_rx_status *)skb->cb);
  855. if (mstat.flag & RX_FLAG_RADIOTAP_HE)
  856. data += sizeof(struct ieee80211_radiotap_he);
  857. if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU)
  858. data += sizeof(struct ieee80211_radiotap_he_mu);
  859. return data;
  860. }
  861. static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
  862. {
  863. int len = ieee80211_get_hdrlen_from_skb(skb);
  864. if (len % 4 == 0)
  865. return;
  866. skb_push(skb, 2);
  867. memmove(skb->data, skb->data + 2, len);
  868. skb->data[len] = 0;
  869. skb->data[len + 1] = 0;
  870. }
  871. static inline bool mt76_is_skb_pktid(u8 pktid)
  872. {
  873. if (pktid & MT_PACKET_ID_HAS_RATE)
  874. return false;
  875. return pktid >= MT_PACKET_ID_FIRST;
  876. }
  877. static inline u8 mt76_tx_power_nss_delta(u8 nss)
  878. {
  879. static const u8 nss_delta[4] = { 0, 6, 9, 12 };
  880. u8 idx = nss - 1;
  881. return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0;
  882. }
  883. static inline bool mt76_testmode_enabled(struct mt76_phy *phy)
  884. {
  885. #ifdef CONFIG_NL80211_TESTMODE
  886. return phy->test.state != MT76_TM_STATE_OFF;
  887. #else
  888. return false;
  889. #endif
  890. }
  891. static inline bool mt76_is_testmode_skb(struct mt76_dev *dev,
  892. struct sk_buff *skb,
  893. struct ieee80211_hw **hw)
  894. {
  895. #ifdef CONFIG_NL80211_TESTMODE
  896. int i;
  897. for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
  898. struct mt76_phy *phy = dev->phys[i];
  899. if (phy && skb == phy->test.tx_skb) {
  900. *hw = dev->phys[i]->hw;
  901. return true;
  902. }
  903. }
  904. return false;
  905. #else
  906. return false;
  907. #endif
  908. }
  909. void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
  910. void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
  911. struct mt76_wcid *wcid, struct sk_buff *skb);
  912. void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
  913. void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
  914. bool send_bar);
  915. void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
  916. void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
  917. void mt76_txq_schedule_all(struct mt76_phy *phy);
  918. void mt76_tx_worker_run(struct mt76_dev *dev);
  919. void mt76_tx_worker(struct mt76_worker *w);
  920. void mt76_release_buffered_frames(struct ieee80211_hw *hw,
  921. struct ieee80211_sta *sta,
  922. u16 tids, int nframes,
  923. enum ieee80211_frame_release_type reason,
  924. bool more_data);
  925. bool mt76_has_tx_pending(struct mt76_phy *phy);
  926. void mt76_set_channel(struct mt76_phy *phy);
  927. void mt76_update_survey(struct mt76_phy *phy);
  928. void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
  929. int mt76_get_survey(struct ieee80211_hw *hw, int idx,
  930. struct survey_info *survey);
  931. void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
  932. int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
  933. u16 ssn, u16 size);
  934. void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
  935. void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
  936. struct ieee80211_key_conf *key);
  937. void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
  938. __acquires(&dev->status_lock);
  939. void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
  940. __releases(&dev->status_lock);
  941. int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
  942. struct sk_buff *skb);
  943. struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
  944. struct mt76_wcid *wcid, int pktid,
  945. struct sk_buff_head *list);
  946. void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
  947. struct sk_buff_head *list);
  948. void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
  949. struct list_head *free_list);
  950. static inline void
  951. mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
  952. {
  953. __mt76_tx_complete_skb(dev, wcid, skb, NULL);
  954. }
  955. void mt76_tx_status_check(struct mt76_dev *dev, bool flush);
  956. int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  957. struct ieee80211_sta *sta,
  958. enum ieee80211_sta_state old_state,
  959. enum ieee80211_sta_state new_state);
  960. void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
  961. struct ieee80211_sta *sta);
  962. void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  963. struct ieee80211_sta *sta);
  964. int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
  965. int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  966. int *dbm);
  967. int mt76_init_sar_power(struct ieee80211_hw *hw,
  968. const struct cfg80211_sar_specs *sar);
  969. int mt76_get_sar_power(struct mt76_phy *phy,
  970. struct ieee80211_channel *chan,
  971. int power);
  972. void mt76_csa_check(struct mt76_dev *dev);
  973. void mt76_csa_finish(struct mt76_dev *dev);
  974. int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
  975. int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
  976. void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
  977. int mt76_get_rate(struct mt76_dev *dev,
  978. struct ieee80211_supported_band *sband,
  979. int idx, bool cck);
  980. void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  981. const u8 *mac);
  982. void mt76_sw_scan_complete(struct ieee80211_hw *hw,
  983. struct ieee80211_vif *vif);
  984. enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
  985. int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
  986. void *data, int len);
  987. int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
  988. struct netlink_callback *cb, void *data, int len);
  989. int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
  990. int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
  991. static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
  992. {
  993. #ifdef CONFIG_NL80211_TESTMODE
  994. enum mt76_testmode_state state = MT76_TM_STATE_IDLE;
  995. if (disable || phy->test.state == MT76_TM_STATE_OFF)
  996. state = MT76_TM_STATE_OFF;
  997. mt76_testmode_set_state(phy, state);
  998. #endif
  999. }
  1000. /* internal */
  1001. static inline struct ieee80211_hw *
  1002. mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
  1003. {
  1004. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  1005. u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
  1006. struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx);
  1007. info->hw_queue &= ~MT_TX_HW_QUEUE_PHY;
  1008. return hw;
  1009. }
  1010. void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
  1011. void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
  1012. struct napi_struct *napi);
  1013. void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
  1014. struct napi_struct *napi);
  1015. void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
  1016. void mt76_testmode_tx_pending(struct mt76_phy *phy);
  1017. void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
  1018. struct mt76_queue_entry *e);
  1019. /* usb */
  1020. static inline bool mt76u_urb_error(struct urb *urb)
  1021. {
  1022. return urb->status &&
  1023. urb->status != -ECONNRESET &&
  1024. urb->status != -ESHUTDOWN &&
  1025. urb->status != -ENOENT;
  1026. }
  1027. /* Map hardware queues to usb endpoints */
  1028. static inline u8 q2ep(u8 qid)
  1029. {
  1030. /* TODO: take management packets to queue 5 */
  1031. return qid + 1;
  1032. }
  1033. static inline int
  1034. mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
  1035. int timeout, int ep)
  1036. {
  1037. struct usb_interface *uintf = to_usb_interface(dev->dev);
  1038. struct usb_device *udev = interface_to_usbdev(uintf);
  1039. struct mt76_usb *usb = &dev->usb;
  1040. unsigned int pipe;
  1041. if (actual_len)
  1042. pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]);
  1043. else
  1044. pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]);
  1045. return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
  1046. }
  1047. void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
  1048. struct mt76_sta_stats *stats);
  1049. int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
  1050. int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
  1051. u16 val, u16 offset, void *buf, size_t len);
  1052. int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
  1053. u8 req_type, u16 val, u16 offset,
  1054. void *buf, size_t len);
  1055. void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
  1056. const u16 offset, const u32 val);
  1057. void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
  1058. void *data, int len);
  1059. u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
  1060. void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
  1061. u32 addr, u32 val);
  1062. int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
  1063. struct mt76_bus_ops *ops);
  1064. int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
  1065. int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
  1066. int mt76u_alloc_queues(struct mt76_dev *dev);
  1067. void mt76u_stop_tx(struct mt76_dev *dev);
  1068. void mt76u_stop_rx(struct mt76_dev *dev);
  1069. int mt76u_resume_rx(struct mt76_dev *dev);
  1070. void mt76u_queues_deinit(struct mt76_dev *dev);
  1071. int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
  1072. const struct mt76_bus_ops *bus_ops);
  1073. int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid);
  1074. int mt76s_alloc_tx(struct mt76_dev *dev);
  1075. void mt76s_deinit(struct mt76_dev *dev);
  1076. void mt76s_sdio_irq(struct sdio_func *func);
  1077. void mt76s_txrx_worker(struct mt76_sdio *sdio);
  1078. bool mt76s_txqs_empty(struct mt76_dev *dev);
  1079. int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func,
  1080. int hw_ver);
  1081. u32 mt76s_rr(struct mt76_dev *dev, u32 offset);
  1082. void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val);
  1083. u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
  1084. u32 mt76s_read_pcr(struct mt76_dev *dev);
  1085. void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
  1086. const void *data, int len);
  1087. void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
  1088. void *data, int len);
  1089. int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
  1090. const struct mt76_reg_pair *data,
  1091. int len);
  1092. int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
  1093. struct mt76_reg_pair *data, int len);
  1094. struct sk_buff *
  1095. __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
  1096. int len, int data_len, gfp_t gfp);
  1097. static inline struct sk_buff *
  1098. mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
  1099. int data_len)
  1100. {
  1101. return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL);
  1102. }
  1103. void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
  1104. struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
  1105. unsigned long expires);
  1106. int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
  1107. int len, bool wait_resp, struct sk_buff **ret);
  1108. int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
  1109. int cmd, bool wait_resp, struct sk_buff **ret);
  1110. int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
  1111. int len, int max_len);
  1112. static inline int
  1113. mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
  1114. int len)
  1115. {
  1116. int max_len = 4096 - dev->mcu_ops->headroom;
  1117. return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len);
  1118. }
  1119. static inline int
  1120. mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len,
  1121. bool wait_resp)
  1122. {
  1123. return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL);
  1124. }
  1125. static inline int
  1126. mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd,
  1127. bool wait_resp)
  1128. {
  1129. return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL);
  1130. }
  1131. void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
  1132. s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
  1133. struct ieee80211_channel *chan,
  1134. struct mt76_power_limits *dest,
  1135. s8 target_power);
  1136. struct mt76_txwi_cache *
  1137. mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
  1138. int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
  1139. void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
  1140. static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
  1141. {
  1142. spin_lock_bh(&dev->token_lock);
  1143. __mt76_set_tx_blocked(dev, blocked);
  1144. spin_unlock_bh(&dev->token_lock);
  1145. }
  1146. static inline int
  1147. mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
  1148. {
  1149. int token;
  1150. spin_lock_bh(&dev->token_lock);
  1151. token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
  1152. spin_unlock_bh(&dev->token_lock);
  1153. return token;
  1154. }
  1155. static inline struct mt76_txwi_cache *
  1156. mt76_token_put(struct mt76_dev *dev, int token)
  1157. {
  1158. struct mt76_txwi_cache *txwi;
  1159. spin_lock_bh(&dev->token_lock);
  1160. txwi = idr_remove(&dev->token, token);
  1161. spin_unlock_bh(&dev->token_lock);
  1162. return txwi;
  1163. }
  1164. static inline void mt76_packet_id_init(struct mt76_wcid *wcid)
  1165. {
  1166. INIT_LIST_HEAD(&wcid->list);
  1167. idr_init(&wcid->pktid);
  1168. }
  1169. static inline void
  1170. mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
  1171. {
  1172. struct sk_buff_head list;
  1173. mt76_tx_status_lock(dev, &list);
  1174. mt76_tx_status_skb_get(dev, wcid, -1, &list);
  1175. mt76_tx_status_unlock(dev, &list);
  1176. idr_destroy(&wcid->pktid);
  1177. }
  1178. #endif