htt.h 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459
  1. /* SPDX-License-Identifier: ISC */
  2. /*
  3. * Copyright (c) 2005-2011 Atheros Communications Inc.
  4. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  5. * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  6. */
  7. #ifndef _HTT_H_
  8. #define _HTT_H_
  9. #include <linux/bug.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/dmapool.h>
  12. #include <linux/hashtable.h>
  13. #include <linux/kfifo.h>
  14. #include <net/mac80211.h>
  15. #include "htc.h"
  16. #include "hw.h"
  17. #include "rx_desc.h"
  18. enum htt_dbg_stats_type {
  19. HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
  20. HTT_DBG_STATS_RX_REORDER = 1 << 1,
  21. HTT_DBG_STATS_RX_RATE_INFO = 1 << 2,
  22. HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3,
  23. HTT_DBG_STATS_TX_RATE_INFO = 1 << 4,
  24. /* bits 5-23 currently reserved */
  25. HTT_DBG_NUM_STATS /* keep this last */
  26. };
  27. enum htt_h2t_msg_type { /* host-to-target */
  28. HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
  29. HTT_H2T_MSG_TYPE_TX_FRM = 1,
  30. HTT_H2T_MSG_TYPE_RX_RING_CFG = 2,
  31. HTT_H2T_MSG_TYPE_STATS_REQ = 3,
  32. HTT_H2T_MSG_TYPE_SYNC = 4,
  33. HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
  34. HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
  35. /* This command is used for sending management frames in HTT < 3.0.
  36. * HTT >= 3.0 uses TX_FRM for everything.
  37. */
  38. HTT_H2T_MSG_TYPE_MGMT_TX = 7,
  39. HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11,
  40. HTT_H2T_NUM_MSGS /* keep this last */
  41. };
  42. struct htt_cmd_hdr {
  43. u8 msg_type;
  44. } __packed;
  45. struct htt_ver_req {
  46. u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
  47. } __packed;
  48. /*
  49. * HTT tx MSDU descriptor
  50. *
  51. * The HTT tx MSDU descriptor is created by the host HTT SW for each
  52. * tx MSDU. The HTT tx MSDU descriptor contains the information that
  53. * the target firmware needs for the FW's tx processing, particularly
  54. * for creating the HW msdu descriptor.
  55. * The same HTT tx descriptor is used for HL and LL systems, though
  56. * a few fields within the tx descriptor are used only by LL or
  57. * only by HL.
  58. * The HTT tx descriptor is defined in two manners: by a struct with
  59. * bitfields, and by a series of [dword offset, bit mask, bit shift]
  60. * definitions.
  61. * The target should use the struct def, for simplicitly and clarity,
  62. * but the host shall use the bit-mast + bit-shift defs, to be endian-
  63. * neutral. Specifically, the host shall use the get/set macros built
  64. * around the mask + shift defs.
  65. */
  66. struct htt_data_tx_desc_frag {
  67. union {
  68. struct double_word_addr {
  69. __le32 paddr;
  70. __le32 len;
  71. } __packed dword_addr;
  72. struct triple_word_addr {
  73. __le32 paddr_lo;
  74. __le16 paddr_hi;
  75. __le16 len_16;
  76. } __packed tword_addr;
  77. } __packed;
  78. } __packed;
  79. struct htt_msdu_ext_desc {
  80. __le32 tso_flag[3];
  81. __le16 ip_identification;
  82. u8 flags;
  83. u8 reserved;
  84. struct htt_data_tx_desc_frag frags[6];
  85. };
  86. struct htt_msdu_ext_desc_64 {
  87. __le32 tso_flag[5];
  88. __le16 ip_identification;
  89. u8 flags;
  90. u8 reserved;
  91. struct htt_data_tx_desc_frag frags[6];
  92. };
  93. #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
  94. #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
  95. #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
  96. #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
  97. #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
  98. #define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
  99. | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
  100. | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
  101. | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
  102. | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
  103. #define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16)
  104. #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17)
  105. #define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18)
  106. #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19)
  107. #define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20)
  108. #define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21)
  109. #define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
  110. | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
  111. | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
  112. | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
  113. | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
  114. enum htt_data_tx_desc_flags0 {
  115. HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
  116. HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
  117. HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2,
  118. HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3,
  119. HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4
  120. #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
  121. #define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
  122. };
  123. enum htt_data_tx_desc_flags1 {
  124. #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
  125. #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
  126. #define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
  127. #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
  128. #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
  129. #define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
  130. HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11,
  131. HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
  132. HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
  133. HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
  134. HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15
  135. };
  136. #define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000
  137. #define HTT_TX_CREDIT_DELTA_ABS_S 16
  138. #define HTT_TX_CREDIT_DELTA_ABS_GET(word) \
  139. (((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S)
  140. #define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100
  141. #define HTT_TX_CREDIT_SIGN_BIT_S 8
  142. #define HTT_TX_CREDIT_SIGN_BIT_GET(word) \
  143. (((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S)
  144. enum htt_data_tx_ext_tid {
  145. HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
  146. HTT_DATA_TX_EXT_TID_MGMT = 17,
  147. HTT_DATA_TX_EXT_TID_INVALID = 31
  148. };
  149. #define HTT_INVALID_PEERID 0xFFFF
  150. /*
  151. * htt_data_tx_desc - used for data tx path
  152. *
  153. * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
  154. * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
  155. * for special kinds of tids
  156. * postponed: only for HL hosts. indicates if this is a resend
  157. * (HL hosts manage queues on the host )
  158. * more_in_batch: only for HL hosts. indicates if more packets are
  159. * pending. this allows target to wait and aggregate
  160. * freq: 0 means home channel of given vdev. intended for offchannel
  161. */
  162. struct htt_data_tx_desc {
  163. u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
  164. __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
  165. __le16 len;
  166. __le16 id;
  167. __le32 frags_paddr;
  168. union {
  169. __le32 peerid;
  170. struct {
  171. __le16 peerid;
  172. __le16 freq;
  173. } __packed offchan_tx;
  174. } __packed;
  175. u8 prefetch[0]; /* start of frame, for FW classification engine */
  176. } __packed;
  177. struct htt_data_tx_desc_64 {
  178. u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
  179. __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
  180. __le16 len;
  181. __le16 id;
  182. __le64 frags_paddr;
  183. union {
  184. __le32 peerid;
  185. struct {
  186. __le16 peerid;
  187. __le16 freq;
  188. } __packed offchan_tx;
  189. } __packed;
  190. u8 prefetch[0]; /* start of frame, for FW classification engine */
  191. } __packed;
  192. enum htt_rx_ring_flags {
  193. HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
  194. HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
  195. HTT_RX_RING_FLAGS_PPDU_START = 1 << 2,
  196. HTT_RX_RING_FLAGS_PPDU_END = 1 << 3,
  197. HTT_RX_RING_FLAGS_MPDU_START = 1 << 4,
  198. HTT_RX_RING_FLAGS_MPDU_END = 1 << 5,
  199. HTT_RX_RING_FLAGS_MSDU_START = 1 << 6,
  200. HTT_RX_RING_FLAGS_MSDU_END = 1 << 7,
  201. HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
  202. HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9,
  203. HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10,
  204. HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
  205. HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12,
  206. HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13,
  207. HTT_RX_RING_FLAGS_NULL_RX = 1 << 14,
  208. HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
  209. };
  210. #define HTT_RX_RING_SIZE_MIN 128
  211. #define HTT_RX_RING_SIZE_MAX 2048
  212. #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
  213. #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
  214. #define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
  215. struct htt_rx_ring_rx_desc_offsets {
  216. /* the following offsets are in 4-byte units */
  217. __le16 mac80211_hdr_offset;
  218. __le16 msdu_payload_offset;
  219. __le16 ppdu_start_offset;
  220. __le16 ppdu_end_offset;
  221. __le16 mpdu_start_offset;
  222. __le16 mpdu_end_offset;
  223. __le16 msdu_start_offset;
  224. __le16 msdu_end_offset;
  225. __le16 rx_attention_offset;
  226. __le16 frag_info_offset;
  227. } __packed;
  228. struct htt_rx_ring_setup_ring32 {
  229. __le32 fw_idx_shadow_reg_paddr;
  230. __le32 rx_ring_base_paddr;
  231. __le16 rx_ring_len; /* in 4-byte words */
  232. __le16 rx_ring_bufsize; /* rx skb size - in bytes */
  233. __le16 flags; /* %HTT_RX_RING_FLAGS_ */
  234. __le16 fw_idx_init_val;
  235. struct htt_rx_ring_rx_desc_offsets offsets;
  236. } __packed;
  237. struct htt_rx_ring_setup_ring64 {
  238. __le64 fw_idx_shadow_reg_paddr;
  239. __le64 rx_ring_base_paddr;
  240. __le16 rx_ring_len; /* in 4-byte words */
  241. __le16 rx_ring_bufsize; /* rx skb size - in bytes */
  242. __le16 flags; /* %HTT_RX_RING_FLAGS_ */
  243. __le16 fw_idx_init_val;
  244. struct htt_rx_ring_rx_desc_offsets offsets;
  245. } __packed;
  246. struct htt_rx_ring_setup_hdr {
  247. u8 num_rings; /* supported values: 1, 2 */
  248. __le16 rsvd0;
  249. } __packed;
  250. struct htt_rx_ring_setup_32 {
  251. struct htt_rx_ring_setup_hdr hdr;
  252. struct htt_rx_ring_setup_ring32 rings[];
  253. } __packed;
  254. struct htt_rx_ring_setup_64 {
  255. struct htt_rx_ring_setup_hdr hdr;
  256. struct htt_rx_ring_setup_ring64 rings[];
  257. } __packed;
  258. /*
  259. * htt_stats_req - request target to send specified statistics
  260. *
  261. * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
  262. * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
  263. * so make sure its little-endian.
  264. * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
  265. * so make sure its little-endian.
  266. * @cfg_val: stat_type specific configuration
  267. * @stat_type: see %htt_dbg_stats_type
  268. * @cookie_lsb: used for confirmation message from target->host
  269. * @cookie_msb: ditto as %cookie
  270. */
  271. struct htt_stats_req {
  272. u8 upload_types[3];
  273. u8 rsvd0;
  274. u8 reset_types[3];
  275. struct {
  276. u8 mpdu_bytes;
  277. u8 mpdu_num_msdus;
  278. u8 msdu_bytes;
  279. } __packed;
  280. u8 stat_type;
  281. __le32 cookie_lsb;
  282. __le32 cookie_msb;
  283. } __packed;
  284. #define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
  285. #define HTT_STATS_BIT_MASK GENMASK(16, 0)
  286. /*
  287. * htt_oob_sync_req - request out-of-band sync
  288. *
  289. * The HTT SYNC tells the target to suspend processing of subsequent
  290. * HTT host-to-target messages until some other target agent locally
  291. * informs the target HTT FW that the current sync counter is equal to
  292. * or greater than (in a modulo sense) the sync counter specified in
  293. * the SYNC message.
  294. *
  295. * This allows other host-target components to synchronize their operation
  296. * with HTT, e.g. to ensure that tx frames don't get transmitted until a
  297. * security key has been downloaded to and activated by the target.
  298. * In the absence of any explicit synchronization counter value
  299. * specification, the target HTT FW will use zero as the default current
  300. * sync value.
  301. *
  302. * The HTT target FW will suspend its host->target message processing as long
  303. * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
  304. */
  305. struct htt_oob_sync_req {
  306. u8 sync_count;
  307. __le16 rsvd0;
  308. } __packed;
  309. struct htt_aggr_conf {
  310. u8 max_num_ampdu_subframes;
  311. /* amsdu_subframes is limited by 0x1F mask */
  312. u8 max_num_amsdu_subframes;
  313. } __packed;
  314. struct htt_aggr_conf_v2 {
  315. u8 max_num_ampdu_subframes;
  316. /* amsdu_subframes is limited by 0x1F mask */
  317. u8 max_num_amsdu_subframes;
  318. u8 reserved;
  319. } __packed;
  320. #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
  321. struct htt_mgmt_tx_desc_qca99x0 {
  322. __le32 rate;
  323. } __packed;
  324. struct htt_mgmt_tx_desc {
  325. u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
  326. __le32 msdu_paddr;
  327. __le32 desc_id;
  328. __le32 len;
  329. __le32 vdev_id;
  330. u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
  331. union {
  332. struct htt_mgmt_tx_desc_qca99x0 qca99x0;
  333. } __packed;
  334. } __packed;
  335. enum htt_mgmt_tx_status {
  336. HTT_MGMT_TX_STATUS_OK = 0,
  337. HTT_MGMT_TX_STATUS_RETRY = 1,
  338. HTT_MGMT_TX_STATUS_DROP = 2
  339. };
  340. /*=== target -> host messages ===============================================*/
  341. enum htt_main_t2h_msg_type {
  342. HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
  343. HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
  344. HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
  345. HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
  346. HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
  347. HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
  348. HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
  349. HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
  350. HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
  351. HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
  352. HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
  353. HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
  354. HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
  355. HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
  356. HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
  357. HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
  358. HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
  359. HTT_MAIN_T2H_MSG_TYPE_TEST,
  360. /* keep this last */
  361. HTT_MAIN_T2H_NUM_MSGS
  362. };
  363. enum htt_10x_t2h_msg_type {
  364. HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
  365. HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
  366. HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
  367. HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
  368. HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
  369. HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
  370. HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
  371. HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
  372. HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
  373. HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
  374. HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
  375. HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
  376. HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
  377. HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
  378. HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
  379. HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
  380. HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
  381. HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
  382. HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
  383. /* keep this last */
  384. HTT_10X_T2H_NUM_MSGS
  385. };
  386. enum htt_tlv_t2h_msg_type {
  387. HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
  388. HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
  389. HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
  390. HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
  391. HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
  392. HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
  393. HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
  394. HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
  395. HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
  396. HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
  397. HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
  398. HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
  399. HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
  400. HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
  401. HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
  402. HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
  403. HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
  404. HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
  405. HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
  406. /* 0x13 reservd */
  407. HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
  408. HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
  409. HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
  410. HTT_TLV_T2H_MSG_TYPE_TEST,
  411. /* keep this last */
  412. HTT_TLV_T2H_NUM_MSGS
  413. };
  414. enum htt_10_4_t2h_msg_type {
  415. HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0,
  416. HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1,
  417. HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2,
  418. HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3,
  419. HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
  420. HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5,
  421. HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6,
  422. HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
  423. HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8,
  424. HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9,
  425. HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
  426. HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb,
  427. HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
  428. HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
  429. HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
  430. HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
  431. HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10,
  432. HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11,
  433. HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
  434. HTT_10_4_T2H_MSG_TYPE_TEST = 0x13,
  435. HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
  436. HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
  437. HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
  438. HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17,
  439. HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
  440. /* 0x19 to 0x2f are reserved */
  441. HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30,
  442. HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31,
  443. /* keep this last */
  444. HTT_10_4_T2H_NUM_MSGS
  445. };
  446. enum htt_t2h_msg_type {
  447. HTT_T2H_MSG_TYPE_VERSION_CONF,
  448. HTT_T2H_MSG_TYPE_RX_IND,
  449. HTT_T2H_MSG_TYPE_RX_FLUSH,
  450. HTT_T2H_MSG_TYPE_PEER_MAP,
  451. HTT_T2H_MSG_TYPE_PEER_UNMAP,
  452. HTT_T2H_MSG_TYPE_RX_ADDBA,
  453. HTT_T2H_MSG_TYPE_RX_DELBA,
  454. HTT_T2H_MSG_TYPE_TX_COMPL_IND,
  455. HTT_T2H_MSG_TYPE_PKTLOG,
  456. HTT_T2H_MSG_TYPE_STATS_CONF,
  457. HTT_T2H_MSG_TYPE_RX_FRAG_IND,
  458. HTT_T2H_MSG_TYPE_SEC_IND,
  459. HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
  460. HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
  461. HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
  462. HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
  463. HTT_T2H_MSG_TYPE_RX_PN_IND,
  464. HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
  465. HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
  466. HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
  467. HTT_T2H_MSG_TYPE_CHAN_CHANGE,
  468. HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
  469. HTT_T2H_MSG_TYPE_AGGR_CONF,
  470. HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
  471. HTT_T2H_MSG_TYPE_TEST,
  472. HTT_T2H_MSG_TYPE_EN_STATS,
  473. HTT_T2H_MSG_TYPE_TX_FETCH_IND,
  474. HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
  475. HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
  476. HTT_T2H_MSG_TYPE_PEER_STATS,
  477. /* keep this last */
  478. HTT_T2H_NUM_MSGS
  479. };
  480. /*
  481. * htt_resp_hdr - header for target-to-host messages
  482. *
  483. * msg_type: see htt_t2h_msg_type
  484. */
  485. struct htt_resp_hdr {
  486. u8 msg_type;
  487. } __packed;
  488. #define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
  489. #define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
  490. #define HTT_RESP_HDR_MSG_TYPE_LSB 0
  491. /* htt_ver_resp - response sent for htt_ver_req */
  492. struct htt_ver_resp {
  493. u8 minor;
  494. u8 major;
  495. u8 rsvd0;
  496. } __packed;
  497. #define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
  498. #define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0)
  499. struct htt_mgmt_tx_completion {
  500. u8 rsvd0;
  501. u8 rsvd1;
  502. u8 flags;
  503. __le32 desc_id;
  504. __le32 status;
  505. __le32 ppdu_id;
  506. __le32 info;
  507. } __packed;
  508. #define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
  509. #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
  510. #define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
  511. #define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
  512. #define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7)
  513. #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
  514. #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
  515. #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
  516. #define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
  517. #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
  518. #define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
  519. #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
  520. #define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
  521. #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
  522. #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
  523. #define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
  524. #define HTT_TX_CMPL_FLAG_PPID_PRESENT BIT(1)
  525. #define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2)
  526. #define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3)
  527. #define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3)
  528. #define HTT_TX_DATA_APPEND_RETRIES BIT(0)
  529. #define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1)
  530. struct htt_rx_indication_hdr {
  531. u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
  532. __le16 peer_id;
  533. __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
  534. } __packed;
  535. #define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
  536. #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
  537. #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
  538. #define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
  539. #define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
  540. #define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
  541. #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
  542. #define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
  543. #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
  544. #define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
  545. #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
  546. #define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
  547. #define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
  548. #define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
  549. enum htt_rx_legacy_rate {
  550. HTT_RX_OFDM_48 = 0,
  551. HTT_RX_OFDM_24 = 1,
  552. HTT_RX_OFDM_12,
  553. HTT_RX_OFDM_6,
  554. HTT_RX_OFDM_54,
  555. HTT_RX_OFDM_36,
  556. HTT_RX_OFDM_18,
  557. HTT_RX_OFDM_9,
  558. /* long preamble */
  559. HTT_RX_CCK_11_LP = 0,
  560. HTT_RX_CCK_5_5_LP = 1,
  561. HTT_RX_CCK_2_LP,
  562. HTT_RX_CCK_1_LP,
  563. /* short preamble */
  564. HTT_RX_CCK_11_SP,
  565. HTT_RX_CCK_5_5_SP,
  566. HTT_RX_CCK_2_SP
  567. };
  568. enum htt_rx_legacy_rate_type {
  569. HTT_RX_LEGACY_RATE_OFDM = 0,
  570. HTT_RX_LEGACY_RATE_CCK
  571. };
  572. enum htt_rx_preamble_type {
  573. HTT_RX_LEGACY = 0x4,
  574. HTT_RX_HT = 0x8,
  575. HTT_RX_HT_WITH_TXBF = 0x9,
  576. HTT_RX_VHT = 0xC,
  577. HTT_RX_VHT_WITH_TXBF = 0xD,
  578. };
  579. /*
  580. * Fields: phy_err_valid, phy_err_code, tsf,
  581. * usec_timestamp, sub_usec_timestamp
  582. * ..are valid only if end_valid == 1.
  583. *
  584. * Fields: rssi_chains, legacy_rate_type,
  585. * legacy_rate_cck, preamble_type, service,
  586. * vht_sig_*
  587. * ..are valid only if start_valid == 1;
  588. */
  589. struct htt_rx_indication_ppdu {
  590. u8 combined_rssi;
  591. u8 sub_usec_timestamp;
  592. u8 phy_err_code;
  593. u8 info0; /* HTT_RX_INDICATION_INFO0_ */
  594. struct {
  595. u8 pri20_db;
  596. u8 ext20_db;
  597. u8 ext40_db;
  598. u8 ext80_db;
  599. } __packed rssi_chains[4];
  600. __le32 tsf;
  601. __le32 usec_timestamp;
  602. __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
  603. __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
  604. } __packed;
  605. enum htt_rx_mpdu_status {
  606. HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
  607. HTT_RX_IND_MPDU_STATUS_OK,
  608. HTT_RX_IND_MPDU_STATUS_ERR_FCS,
  609. HTT_RX_IND_MPDU_STATUS_ERR_DUP,
  610. HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
  611. HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
  612. /* only accept EAPOL frames */
  613. HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
  614. HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
  615. /* Non-data in promiscuous mode */
  616. HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
  617. HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
  618. HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
  619. HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
  620. HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
  621. HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
  622. /*
  623. * MISC: discard for unspecified reasons.
  624. * Leave this enum value last.
  625. */
  626. HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
  627. };
  628. struct htt_rx_indication_mpdu_range {
  629. u8 mpdu_count;
  630. u8 mpdu_range_status; /* %htt_rx_mpdu_status */
  631. u8 pad0;
  632. u8 pad1;
  633. } __packed;
  634. struct htt_rx_indication_prefix {
  635. __le16 fw_rx_desc_bytes;
  636. u8 pad0;
  637. u8 pad1;
  638. };
  639. struct htt_rx_indication {
  640. struct htt_rx_indication_hdr hdr;
  641. struct htt_rx_indication_ppdu ppdu;
  642. struct htt_rx_indication_prefix prefix;
  643. /*
  644. * the following fields are both dynamically sized, so
  645. * take care addressing them
  646. */
  647. /* the size of this is %fw_rx_desc_bytes */
  648. struct fw_rx_desc_base fw_desc;
  649. /*
  650. * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
  651. * and has %num_mpdu_ranges elements.
  652. */
  653. struct htt_rx_indication_mpdu_range mpdu_ranges[];
  654. } __packed;
  655. /* High latency version of the RX indication */
  656. struct htt_rx_indication_hl {
  657. struct htt_rx_indication_hdr hdr;
  658. struct htt_rx_indication_ppdu ppdu;
  659. struct htt_rx_indication_prefix prefix;
  660. struct fw_rx_desc_hl fw_desc;
  661. struct htt_rx_indication_mpdu_range mpdu_ranges[];
  662. } __packed;
  663. struct htt_hl_rx_desc {
  664. __le32 info;
  665. __le32 pn_31_0;
  666. union {
  667. struct {
  668. __le16 pn_47_32;
  669. __le16 pn_63_48;
  670. } pn16;
  671. __le32 pn_63_32;
  672. } u0;
  673. __le32 pn_95_64;
  674. __le32 pn_127_96;
  675. } __packed;
  676. static inline struct htt_rx_indication_mpdu_range *
  677. htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
  678. {
  679. void *ptr = rx_ind;
  680. ptr += sizeof(rx_ind->hdr)
  681. + sizeof(rx_ind->ppdu)
  682. + sizeof(rx_ind->prefix)
  683. + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
  684. return ptr;
  685. }
  686. static inline struct htt_rx_indication_mpdu_range *
  687. htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
  688. {
  689. void *ptr = rx_ind;
  690. ptr += sizeof(rx_ind->hdr)
  691. + sizeof(rx_ind->ppdu)
  692. + sizeof(rx_ind->prefix)
  693. + sizeof(rx_ind->fw_desc);
  694. return ptr;
  695. }
  696. enum htt_rx_flush_mpdu_status {
  697. HTT_RX_FLUSH_MPDU_DISCARD = 0,
  698. HTT_RX_FLUSH_MPDU_REORDER = 1,
  699. };
  700. /*
  701. * htt_rx_flush - discard or reorder given range of mpdus
  702. *
  703. * Note: host must check if all sequence numbers between
  704. * [seq_num_start, seq_num_end-1] are valid.
  705. */
  706. struct htt_rx_flush {
  707. __le16 peer_id;
  708. u8 tid;
  709. u8 rsvd0;
  710. u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
  711. u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
  712. u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
  713. };
  714. struct htt_rx_peer_map {
  715. u8 vdev_id;
  716. __le16 peer_id;
  717. u8 addr[6];
  718. u8 rsvd0;
  719. u8 rsvd1;
  720. } __packed;
  721. struct htt_rx_peer_unmap {
  722. u8 rsvd0;
  723. __le16 peer_id;
  724. } __packed;
  725. enum htt_txrx_sec_cast_type {
  726. HTT_TXRX_SEC_MCAST = 0,
  727. HTT_TXRX_SEC_UCAST
  728. };
  729. enum htt_rx_pn_check_type {
  730. HTT_RX_NON_PN_CHECK = 0,
  731. HTT_RX_PN_CHECK
  732. };
  733. enum htt_rx_tkip_demic_type {
  734. HTT_RX_NON_TKIP_MIC = 0,
  735. HTT_RX_TKIP_MIC
  736. };
  737. enum htt_security_types {
  738. HTT_SECURITY_NONE,
  739. HTT_SECURITY_WEP128,
  740. HTT_SECURITY_WEP104,
  741. HTT_SECURITY_WEP40,
  742. HTT_SECURITY_TKIP,
  743. HTT_SECURITY_TKIP_NOMIC,
  744. HTT_SECURITY_AES_CCMP,
  745. HTT_SECURITY_WAPI,
  746. HTT_NUM_SECURITY_TYPES /* keep this last! */
  747. };
  748. #define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
  749. #define ATH10K_TXRX_NUM_EXT_TIDS 19
  750. #define ATH10K_TXRX_NON_QOS_TID 16
  751. enum htt_security_flags {
  752. #define HTT_SECURITY_TYPE_MASK 0x7F
  753. #define HTT_SECURITY_TYPE_LSB 0
  754. HTT_SECURITY_IS_UNICAST = 1 << 7
  755. };
  756. struct htt_security_indication {
  757. union {
  758. /* dont use bitfields; undefined behaviour */
  759. u8 flags; /* %htt_security_flags */
  760. struct {
  761. u8 security_type:7, /* %htt_security_types */
  762. is_unicast:1;
  763. } __packed;
  764. } __packed;
  765. __le16 peer_id;
  766. u8 michael_key[8];
  767. u8 wapi_rsc[16];
  768. } __packed;
  769. #define HTT_RX_BA_INFO0_TID_MASK 0x000F
  770. #define HTT_RX_BA_INFO0_TID_LSB 0
  771. #define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
  772. #define HTT_RX_BA_INFO0_PEER_ID_LSB 4
  773. struct htt_rx_addba {
  774. u8 window_size;
  775. __le16 info0; /* %HTT_RX_BA_INFO0_ */
  776. } __packed;
  777. struct htt_rx_delba {
  778. u8 rsvd0;
  779. __le16 info0; /* %HTT_RX_BA_INFO0_ */
  780. } __packed;
  781. enum htt_data_tx_status {
  782. HTT_DATA_TX_STATUS_OK = 0,
  783. HTT_DATA_TX_STATUS_DISCARD = 1,
  784. HTT_DATA_TX_STATUS_NO_ACK = 2,
  785. HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */
  786. HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
  787. };
  788. enum htt_data_tx_flags {
  789. #define HTT_DATA_TX_STATUS_MASK 0x07
  790. #define HTT_DATA_TX_STATUS_LSB 0
  791. #define HTT_DATA_TX_TID_MASK 0x78
  792. #define HTT_DATA_TX_TID_LSB 3
  793. HTT_DATA_TX_TID_INVALID = 1 << 7
  794. };
  795. #define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
  796. struct htt_append_retries {
  797. __le16 msdu_id;
  798. u8 tx_retries;
  799. u8 flag;
  800. } __packed;
  801. struct htt_data_tx_completion_ext {
  802. struct htt_append_retries a_retries;
  803. __le32 t_stamp;
  804. __le16 msdus_rssi[];
  805. } __packed;
  806. /**
  807. * @brief target -> host TX completion indication message definition
  808. *
  809. * @details
  810. * The following diagram shows the format of the TX completion indication sent
  811. * from the target to the host
  812. *
  813. * |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0|
  814. * |-------------------------------------------------------------|
  815. * header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type |
  816. * |-------------------------------------------------------------|
  817. * payload: | MSDU1 ID | MSDU0 ID |
  818. * |-------------------------------------------------------------|
  819. * : MSDU3 ID : MSDU2 ID :
  820. * |-------------------------------------------------------------|
  821. * | struct htt_tx_compl_ind_append_retries |
  822. * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
  823. * | struct htt_tx_compl_ind_append_tx_tstamp |
  824. * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
  825. * | MSDU1 ACK RSSI | MSDU0 ACK RSSI |
  826. * |-------------------------------------------------------------|
  827. * : MSDU3 ACK RSSI : MSDU2 ACK RSSI :
  828. * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
  829. * -msg_type
  830. * Bits 7:0
  831. * Purpose: identifies this as HTT TX completion indication
  832. * -status
  833. * Bits 10:8
  834. * Purpose: the TX completion status of payload fragmentations descriptors
  835. * Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD
  836. * -tid
  837. * Bits 14:11
  838. * Purpose: the tid associated with those fragmentation descriptors. It is
  839. * valid or not, depending on the tid_invalid bit.
  840. * Value: 0 to 15
  841. * -tid_invalid
  842. * Bits 15:15
  843. * Purpose: this bit indicates whether the tid field is valid or not
  844. * Value: 0 indicates valid, 1 indicates invalid
  845. * -num
  846. * Bits 23:16
  847. * Purpose: the number of payload in this indication
  848. * Value: 1 to 255
  849. * -A0 = append
  850. * Bits 24:24
  851. * Purpose: append the struct htt_tx_compl_ind_append_retries which contains
  852. * the number of tx retries for one MSDU at the end of this message
  853. * Value: 0 indicates no appending, 1 indicates appending
  854. * -A1 = append1
  855. * Bits 25:25
  856. * Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which
  857. * contains the timestamp info for each TX msdu id in payload.
  858. * Value: 0 indicates no appending, 1 indicates appending
  859. * -TP = MSDU tx power presence
  860. * Bits 26:26
  861. * Purpose: Indicate whether the TX_COMPL_IND includes a tx power report
  862. * for each MSDU referenced by the TX_COMPL_IND message.
  863. * The order of the per-MSDU tx power reports matches the order
  864. * of the MSDU IDs.
  865. * Value: 0 indicates not appending, 1 indicates appending
  866. * -A2 = append2
  867. * Bits 27:27
  868. * Purpose: Indicate whether data ACK RSSI is appended for each MSDU in
  869. * TX_COMP_IND message. The order of the per-MSDU ACK RSSI report
  870. * matches the order of the MSDU IDs.
  871. * The ACK RSSI values are valid when status is COMPLETE_OK (and
  872. * this append2 bit is set).
  873. * Value: 0 indicates not appending, 1 indicates appending
  874. */
  875. struct htt_data_tx_completion {
  876. union {
  877. u8 flags;
  878. struct {
  879. u8 status:3,
  880. tid:4,
  881. tid_invalid:1;
  882. } __packed;
  883. } __packed;
  884. u8 num_msdus;
  885. u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
  886. __le16 msdus[]; /* variable length based on %num_msdus */
  887. } __packed;
  888. #define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0)
  889. #define HTT_TX_PPDU_DUR_INFO0_TID_MASK GENMASK(20, 16)
  890. struct htt_data_tx_ppdu_dur {
  891. __le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */
  892. __le32 tx_duration; /* in usecs */
  893. } __packed;
  894. #define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK GENMASK(7, 0)
  895. struct htt_data_tx_compl_ppdu_dur {
  896. __le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */
  897. struct htt_data_tx_ppdu_dur ppdu_dur[];
  898. } __packed;
  899. struct htt_tx_compl_ind_base {
  900. u32 hdr;
  901. u16 payload[1/*or more*/];
  902. } __packed;
  903. struct htt_rc_tx_done_params {
  904. u32 rate_code;
  905. u32 rate_code_flags;
  906. u32 flags;
  907. u32 num_enqued; /* 1 for non-AMPDU */
  908. u32 num_retries;
  909. u32 num_failed; /* for AMPDU */
  910. u32 ack_rssi;
  911. u32 time_stamp;
  912. u32 is_probe;
  913. };
  914. struct htt_rc_update {
  915. u8 vdev_id;
  916. __le16 peer_id;
  917. u8 addr[6];
  918. u8 num_elems;
  919. u8 rsvd0;
  920. struct htt_rc_tx_done_params params[]; /* variable length %num_elems */
  921. } __packed;
  922. /* see htt_rx_indication for similar fields and descriptions */
  923. struct htt_rx_fragment_indication {
  924. union {
  925. u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
  926. struct {
  927. u8 ext_tid:5,
  928. flush_valid:1;
  929. } __packed;
  930. } __packed;
  931. __le16 peer_id;
  932. __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
  933. __le16 fw_rx_desc_bytes;
  934. __le16 rsvd0;
  935. u8 fw_msdu_rx_desc[];
  936. } __packed;
  937. #define ATH10K_IEEE80211_EXTIV BIT(5)
  938. #define ATH10K_IEEE80211_TKIP_MICLEN 8 /* trailing MIC */
  939. #define HTT_RX_FRAG_IND_INFO0_HEADER_LEN 16
  940. #define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
  941. #define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
  942. #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
  943. #define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
  944. #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
  945. #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
  946. #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
  947. #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
  948. struct htt_rx_pn_ind {
  949. __le16 peer_id;
  950. u8 tid;
  951. u8 seqno_start;
  952. u8 seqno_end;
  953. u8 pn_ie_count;
  954. u8 reserved;
  955. u8 pn_ies[];
  956. } __packed;
  957. struct htt_rx_offload_msdu {
  958. __le16 msdu_len;
  959. __le16 peer_id;
  960. u8 vdev_id;
  961. u8 tid;
  962. u8 fw_desc;
  963. u8 payload[];
  964. } __packed;
  965. struct htt_rx_offload_ind {
  966. u8 reserved;
  967. __le16 msdu_count;
  968. } __packed;
  969. struct htt_rx_in_ord_msdu_desc {
  970. __le32 msdu_paddr;
  971. __le16 msdu_len;
  972. u8 fw_desc;
  973. u8 reserved;
  974. } __packed;
  975. struct htt_rx_in_ord_msdu_desc_ext {
  976. __le64 msdu_paddr;
  977. __le16 msdu_len;
  978. u8 fw_desc;
  979. u8 reserved;
  980. } __packed;
  981. struct htt_rx_in_ord_ind {
  982. u8 info;
  983. __le16 peer_id;
  984. u8 vdev_id;
  985. u8 reserved;
  986. __le16 msdu_count;
  987. union {
  988. struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
  989. struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
  990. } __packed;
  991. } __packed;
  992. #define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
  993. #define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
  994. #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
  995. #define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
  996. #define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
  997. #define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
  998. /*
  999. * target -> host test message definition
  1000. *
  1001. * The following field definitions describe the format of the test
  1002. * message sent from the target to the host.
  1003. * The message consists of a 4-octet header, followed by a variable
  1004. * number of 32-bit integer values, followed by a variable number
  1005. * of 8-bit character values.
  1006. *
  1007. * |31 16|15 8|7 0|
  1008. * |-----------------------------------------------------------|
  1009. * | num chars | num ints | msg type |
  1010. * |-----------------------------------------------------------|
  1011. * | int 0 |
  1012. * |-----------------------------------------------------------|
  1013. * | int 1 |
  1014. * |-----------------------------------------------------------|
  1015. * | ... |
  1016. * |-----------------------------------------------------------|
  1017. * | char 3 | char 2 | char 1 | char 0 |
  1018. * |-----------------------------------------------------------|
  1019. * | | | ... | char 4 |
  1020. * |-----------------------------------------------------------|
  1021. * - MSG_TYPE
  1022. * Bits 7:0
  1023. * Purpose: identifies this as a test message
  1024. * Value: HTT_MSG_TYPE_TEST
  1025. * - NUM_INTS
  1026. * Bits 15:8
  1027. * Purpose: indicate how many 32-bit integers follow the message header
  1028. * - NUM_CHARS
  1029. * Bits 31:16
  1030. * Purpose: indicate how many 8-bit characters follow the series of integers
  1031. */
  1032. struct htt_rx_test {
  1033. u8 num_ints;
  1034. __le16 num_chars;
  1035. /* payload consists of 2 lists:
  1036. * a) num_ints * sizeof(__le32)
  1037. * b) num_chars * sizeof(u8) aligned to 4bytes
  1038. */
  1039. u8 payload[];
  1040. } __packed;
  1041. static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
  1042. {
  1043. return (__le32 *)rx_test->payload;
  1044. }
  1045. static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
  1046. {
  1047. return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
  1048. }
  1049. /*
  1050. * target -> host packet log message
  1051. *
  1052. * The following field definitions describe the format of the packet log
  1053. * message sent from the target to the host.
  1054. * The message consists of a 4-octet header,followed by a variable number
  1055. * of 32-bit character values.
  1056. *
  1057. * |31 24|23 16|15 8|7 0|
  1058. * |-----------------------------------------------------------|
  1059. * | | | | msg type |
  1060. * |-----------------------------------------------------------|
  1061. * | payload |
  1062. * |-----------------------------------------------------------|
  1063. * - MSG_TYPE
  1064. * Bits 7:0
  1065. * Purpose: identifies this as a test message
  1066. * Value: HTT_MSG_TYPE_PACKETLOG
  1067. */
  1068. struct htt_pktlog_msg {
  1069. u8 pad[3];
  1070. u8 payload[];
  1071. } __packed;
  1072. struct htt_dbg_stats_rx_reorder_stats {
  1073. /* Non QoS MPDUs received */
  1074. __le32 deliver_non_qos;
  1075. /* MPDUs received in-order */
  1076. __le32 deliver_in_order;
  1077. /* Flush due to reorder timer expired */
  1078. __le32 deliver_flush_timeout;
  1079. /* Flush due to move out of window */
  1080. __le32 deliver_flush_oow;
  1081. /* Flush due to DELBA */
  1082. __le32 deliver_flush_delba;
  1083. /* MPDUs dropped due to FCS error */
  1084. __le32 fcs_error;
  1085. /* MPDUs dropped due to monitor mode non-data packet */
  1086. __le32 mgmt_ctrl;
  1087. /* MPDUs dropped due to invalid peer */
  1088. __le32 invalid_peer;
  1089. /* MPDUs dropped due to duplication (non aggregation) */
  1090. __le32 dup_non_aggr;
  1091. /* MPDUs dropped due to processed before */
  1092. __le32 dup_past;
  1093. /* MPDUs dropped due to duplicate in reorder queue */
  1094. __le32 dup_in_reorder;
  1095. /* Reorder timeout happened */
  1096. __le32 reorder_timeout;
  1097. /* invalid bar ssn */
  1098. __le32 invalid_bar_ssn;
  1099. /* reorder reset due to bar ssn */
  1100. __le32 ssn_reset;
  1101. };
  1102. struct htt_dbg_stats_wal_tx_stats {
  1103. /* Num HTT cookies queued to dispatch list */
  1104. __le32 comp_queued;
  1105. /* Num HTT cookies dispatched */
  1106. __le32 comp_delivered;
  1107. /* Num MSDU queued to WAL */
  1108. __le32 msdu_enqued;
  1109. /* Num MPDU queue to WAL */
  1110. __le32 mpdu_enqued;
  1111. /* Num MSDUs dropped by WMM limit */
  1112. __le32 wmm_drop;
  1113. /* Num Local frames queued */
  1114. __le32 local_enqued;
  1115. /* Num Local frames done */
  1116. __le32 local_freed;
  1117. /* Num queued to HW */
  1118. __le32 hw_queued;
  1119. /* Num PPDU reaped from HW */
  1120. __le32 hw_reaped;
  1121. /* Num underruns */
  1122. __le32 underrun;
  1123. /* Num PPDUs cleaned up in TX abort */
  1124. __le32 tx_abort;
  1125. /* Num MPDUs requeued by SW */
  1126. __le32 mpdus_requeued;
  1127. /* excessive retries */
  1128. __le32 tx_ko;
  1129. /* data hw rate code */
  1130. __le32 data_rc;
  1131. /* Scheduler self triggers */
  1132. __le32 self_triggers;
  1133. /* frames dropped due to excessive sw retries */
  1134. __le32 sw_retry_failure;
  1135. /* illegal rate phy errors */
  1136. __le32 illgl_rate_phy_err;
  1137. /* wal pdev continuous xretry */
  1138. __le32 pdev_cont_xretry;
  1139. /* wal pdev continuous xretry */
  1140. __le32 pdev_tx_timeout;
  1141. /* wal pdev resets */
  1142. __le32 pdev_resets;
  1143. __le32 phy_underrun;
  1144. /* MPDU is more than txop limit */
  1145. __le32 txop_ovf;
  1146. } __packed;
  1147. struct htt_dbg_stats_wal_rx_stats {
  1148. /* Cnts any change in ring routing mid-ppdu */
  1149. __le32 mid_ppdu_route_change;
  1150. /* Total number of statuses processed */
  1151. __le32 status_rcvd;
  1152. /* Extra frags on rings 0-3 */
  1153. __le32 r0_frags;
  1154. __le32 r1_frags;
  1155. __le32 r2_frags;
  1156. __le32 r3_frags;
  1157. /* MSDUs / MPDUs delivered to HTT */
  1158. __le32 htt_msdus;
  1159. __le32 htt_mpdus;
  1160. /* MSDUs / MPDUs delivered to local stack */
  1161. __le32 loc_msdus;
  1162. __le32 loc_mpdus;
  1163. /* AMSDUs that have more MSDUs than the status ring size */
  1164. __le32 oversize_amsdu;
  1165. /* Number of PHY errors */
  1166. __le32 phy_errs;
  1167. /* Number of PHY errors drops */
  1168. __le32 phy_err_drop;
  1169. /* Number of mpdu errors - FCS, MIC, ENC etc. */
  1170. __le32 mpdu_errs;
  1171. } __packed;
  1172. struct htt_dbg_stats_wal_peer_stats {
  1173. __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
  1174. } __packed;
  1175. struct htt_dbg_stats_wal_pdev_txrx {
  1176. struct htt_dbg_stats_wal_tx_stats tx_stats;
  1177. struct htt_dbg_stats_wal_rx_stats rx_stats;
  1178. struct htt_dbg_stats_wal_peer_stats peer_stats;
  1179. } __packed;
  1180. struct htt_dbg_stats_rx_rate_info {
  1181. __le32 mcs[10];
  1182. __le32 sgi[10];
  1183. __le32 nss[4];
  1184. __le32 stbc[10];
  1185. __le32 bw[3];
  1186. __le32 pream[6];
  1187. __le32 ldpc;
  1188. __le32 txbf;
  1189. };
  1190. /*
  1191. * htt_dbg_stats_status -
  1192. * present - The requested stats have been delivered in full.
  1193. * This indicates that either the stats information was contained
  1194. * in its entirety within this message, or else this message
  1195. * completes the delivery of the requested stats info that was
  1196. * partially delivered through earlier STATS_CONF messages.
  1197. * partial - The requested stats have been delivered in part.
  1198. * One or more subsequent STATS_CONF messages with the same
  1199. * cookie value will be sent to deliver the remainder of the
  1200. * information.
  1201. * error - The requested stats could not be delivered, for example due
  1202. * to a shortage of memory to construct a message holding the
  1203. * requested stats.
  1204. * invalid - The requested stat type is either not recognized, or the
  1205. * target is configured to not gather the stats type in question.
  1206. * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
  1207. * series_done - This special value indicates that no further stats info
  1208. * elements are present within a series of stats info elems
  1209. * (within a stats upload confirmation message).
  1210. */
  1211. enum htt_dbg_stats_status {
  1212. HTT_DBG_STATS_STATUS_PRESENT = 0,
  1213. HTT_DBG_STATS_STATUS_PARTIAL = 1,
  1214. HTT_DBG_STATS_STATUS_ERROR = 2,
  1215. HTT_DBG_STATS_STATUS_INVALID = 3,
  1216. HTT_DBG_STATS_STATUS_SERIES_DONE = 7
  1217. };
  1218. /*
  1219. * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
  1220. *
  1221. * The following field definitions describe the format of the HTT host
  1222. * to target frag_desc/msdu_ext bank configuration message.
  1223. * The message contains the based address and the min and max id of the
  1224. * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
  1225. * MSDU_EXT/FRAG_DESC.
  1226. * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
  1227. * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
  1228. * the hardware does the mapping/translation.
  1229. *
  1230. * Total banks that can be configured is configured to 16.
  1231. *
  1232. * This should be called before any TX has be initiated by the HTT
  1233. *
  1234. * |31 16|15 8|7 5|4 0|
  1235. * |------------------------------------------------------------|
  1236. * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
  1237. * |------------------------------------------------------------|
  1238. * | BANK0_BASE_ADDRESS |
  1239. * |------------------------------------------------------------|
  1240. * | ... |
  1241. * |------------------------------------------------------------|
  1242. * | BANK15_BASE_ADDRESS |
  1243. * |------------------------------------------------------------|
  1244. * | BANK0_MAX_ID | BANK0_MIN_ID |
  1245. * |------------------------------------------------------------|
  1246. * | ... |
  1247. * |------------------------------------------------------------|
  1248. * | BANK15_MAX_ID | BANK15_MIN_ID |
  1249. * |------------------------------------------------------------|
  1250. * Header fields:
  1251. * - MSG_TYPE
  1252. * Bits 7:0
  1253. * Value: 0x6
  1254. * - BANKx_BASE_ADDRESS
  1255. * Bits 31:0
  1256. * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
  1257. * bank physical/bus address.
  1258. * - BANKx_MIN_ID
  1259. * Bits 15:0
  1260. * Purpose: Provide a mechanism to specify the min index that needs to
  1261. * mapped.
  1262. * - BANKx_MAX_ID
  1263. * Bits 31:16
  1264. * Purpose: Provide a mechanism to specify the max index that needs to
  1265. *
  1266. */
  1267. struct htt_frag_desc_bank_id {
  1268. __le16 bank_min_id;
  1269. __le16 bank_max_id;
  1270. } __packed;
  1271. /* real is 16 but it wouldn't fit in the max htt message size
  1272. * so we use a conservatively safe value for now
  1273. */
  1274. #define HTT_FRAG_DESC_BANK_MAX 4
  1275. #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
  1276. #define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
  1277. #define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2)
  1278. #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3)
  1279. #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4)
  1280. #define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4
  1281. enum htt_q_depth_type {
  1282. HTT_Q_DEPTH_TYPE_BYTES = 0,
  1283. HTT_Q_DEPTH_TYPE_MSDUS = 1,
  1284. };
  1285. #define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
  1286. TARGET_10_4_NUM_VDEVS)
  1287. #define HTT_TX_Q_STATE_NUM_TIDS 8
  1288. #define HTT_TX_Q_STATE_ENTRY_SIZE 1
  1289. #define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
  1290. /**
  1291. * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
  1292. *
  1293. * Defines host q state format and behavior. See htt_q_state.
  1294. *
  1295. * @record_size: Defines the size of each host q entry in bytes. In practice
  1296. * however firmware (at least 10.4.3-00191) ignores this host
  1297. * configuration value and uses hardcoded value of 1.
  1298. * @record_multiplier: This is valid only when q depth type is MSDUs. It
  1299. * defines the exponent for the power of 2 multiplication.
  1300. */
  1301. struct htt_q_state_conf {
  1302. __le32 paddr;
  1303. __le16 num_peers;
  1304. __le16 num_tids;
  1305. u8 record_size;
  1306. u8 record_multiplier;
  1307. u8 pad[2];
  1308. } __packed;
  1309. struct htt_frag_desc_bank_cfg32 {
  1310. u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
  1311. u8 num_banks;
  1312. u8 desc_size;
  1313. __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
  1314. struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
  1315. struct htt_q_state_conf q_state;
  1316. } __packed;
  1317. struct htt_frag_desc_bank_cfg64 {
  1318. u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
  1319. u8 num_banks;
  1320. u8 desc_size;
  1321. __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
  1322. struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
  1323. struct htt_q_state_conf q_state;
  1324. } __packed;
  1325. #define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
  1326. #define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
  1327. #define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
  1328. #define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0
  1329. #define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
  1330. /**
  1331. * htt_q_state - shared between host and firmware via DMA
  1332. *
  1333. * This structure is used for the host to expose it's software queue state to
  1334. * firmware so that its rate control can schedule fetch requests for optimized
  1335. * performance. This is most notably used for MU-MIMO aggregation when multiple
  1336. * MU clients are connected.
  1337. *
  1338. * @count: Each element defines the host queue depth. When q depth type was
  1339. * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
  1340. * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
  1341. * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
  1342. * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
  1343. * record_multiplier (see htt_q_state_conf).
  1344. * @map: Used by firmware to quickly check which host queues are not empty. It
  1345. * is a bitmap simply saying.
  1346. * @seq: Used by firmware to quickly check if the host queues were updated
  1347. * since it last checked.
  1348. *
  1349. * FIXME: Is the q_state map[] size calculation really correct?
  1350. */
  1351. struct htt_q_state {
  1352. u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
  1353. u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
  1354. __le32 seq;
  1355. } __packed;
  1356. #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff
  1357. #define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0
  1358. #define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000
  1359. #define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12
  1360. struct htt_tx_fetch_record {
  1361. __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
  1362. __le16 num_msdus;
  1363. __le32 num_bytes;
  1364. } __packed;
  1365. struct htt_tx_fetch_ind {
  1366. u8 pad0;
  1367. __le16 fetch_seq_num;
  1368. __le32 token;
  1369. __le16 num_resp_ids;
  1370. __le16 num_records;
  1371. union {
  1372. /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
  1373. DECLARE_FLEX_ARRAY(__le32, resp_ids);
  1374. DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records);
  1375. };
  1376. } __packed;
  1377. static inline void *
  1378. ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
  1379. {
  1380. return (void *)&ind->records[le16_to_cpu(ind->num_records)];
  1381. }
  1382. struct htt_tx_fetch_resp {
  1383. u8 pad0;
  1384. __le16 resp_id;
  1385. __le16 fetch_seq_num;
  1386. __le16 num_records;
  1387. __le32 token;
  1388. struct htt_tx_fetch_record records[];
  1389. } __packed;
  1390. struct htt_tx_fetch_confirm {
  1391. u8 pad0;
  1392. __le16 num_resp_ids;
  1393. __le32 resp_ids[];
  1394. } __packed;
  1395. enum htt_tx_mode_switch_mode {
  1396. HTT_TX_MODE_SWITCH_PUSH = 0,
  1397. HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
  1398. };
  1399. #define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0)
  1400. #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe
  1401. #define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1
  1402. #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003
  1403. #define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0
  1404. #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc
  1405. #define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2
  1406. #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff
  1407. #define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0
  1408. #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000
  1409. #define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12
  1410. struct htt_tx_mode_switch_record {
  1411. __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
  1412. __le16 num_max_msdus;
  1413. } __packed;
  1414. struct htt_tx_mode_switch_ind {
  1415. u8 pad0;
  1416. __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
  1417. __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
  1418. u8 pad1[2];
  1419. struct htt_tx_mode_switch_record records[];
  1420. } __packed;
  1421. struct htt_channel_change {
  1422. u8 pad[3];
  1423. __le32 freq;
  1424. __le32 center_freq1;
  1425. __le32 center_freq2;
  1426. __le32 phymode;
  1427. } __packed;
  1428. struct htt_per_peer_tx_stats_ind {
  1429. __le32 succ_bytes;
  1430. __le32 retry_bytes;
  1431. __le32 failed_bytes;
  1432. u8 ratecode;
  1433. u8 flags;
  1434. __le16 peer_id;
  1435. __le16 succ_pkts;
  1436. __le16 retry_pkts;
  1437. __le16 failed_pkts;
  1438. __le16 tx_duration;
  1439. __le32 reserved1;
  1440. __le32 reserved2;
  1441. } __packed;
  1442. struct htt_peer_tx_stats {
  1443. u8 num_ppdu;
  1444. u8 ppdu_len;
  1445. u8 version;
  1446. u8 payload[];
  1447. } __packed;
  1448. #define ATH10K_10_2_TX_STATS_OFFSET 136
  1449. #define PEER_STATS_FOR_NO_OF_PPDUS 4
  1450. struct ath10k_10_2_peer_tx_stats {
  1451. u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
  1452. u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
  1453. __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
  1454. u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
  1455. __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
  1456. u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
  1457. __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
  1458. u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
  1459. __le32 tx_duration;
  1460. u8 tx_ppdu_cnt;
  1461. u8 peer_id;
  1462. } __packed;
  1463. union htt_rx_pn_t {
  1464. /* WEP: 24-bit PN */
  1465. u32 pn24;
  1466. /* TKIP or CCMP: 48-bit PN */
  1467. u64 pn48;
  1468. /* WAPI: 128-bit PN */
  1469. u64 pn128[2];
  1470. };
  1471. struct htt_cmd {
  1472. struct htt_cmd_hdr hdr;
  1473. union {
  1474. struct htt_ver_req ver_req;
  1475. struct htt_mgmt_tx_desc mgmt_tx;
  1476. struct htt_data_tx_desc data_tx;
  1477. struct htt_rx_ring_setup_32 rx_setup_32;
  1478. struct htt_rx_ring_setup_64 rx_setup_64;
  1479. struct htt_stats_req stats_req;
  1480. struct htt_oob_sync_req oob_sync_req;
  1481. struct htt_aggr_conf aggr_conf;
  1482. struct htt_aggr_conf_v2 aggr_conf_v2;
  1483. struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
  1484. struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
  1485. struct htt_tx_fetch_resp tx_fetch_resp;
  1486. };
  1487. } __packed;
  1488. struct htt_resp {
  1489. struct htt_resp_hdr hdr;
  1490. union {
  1491. struct htt_ver_resp ver_resp;
  1492. struct htt_mgmt_tx_completion mgmt_tx_completion;
  1493. struct htt_data_tx_completion data_tx_completion;
  1494. struct htt_rx_indication rx_ind;
  1495. struct htt_rx_indication_hl rx_ind_hl;
  1496. struct htt_rx_fragment_indication rx_frag_ind;
  1497. struct htt_rx_peer_map peer_map;
  1498. struct htt_rx_peer_unmap peer_unmap;
  1499. struct htt_rx_flush rx_flush;
  1500. struct htt_rx_addba rx_addba;
  1501. struct htt_rx_delba rx_delba;
  1502. struct htt_security_indication security_indication;
  1503. struct htt_rc_update rc_update;
  1504. struct htt_rx_test rx_test;
  1505. struct htt_pktlog_msg pktlog_msg;
  1506. struct htt_rx_pn_ind rx_pn_ind;
  1507. struct htt_rx_offload_ind rx_offload_ind;
  1508. struct htt_rx_in_ord_ind rx_in_ord_ind;
  1509. struct htt_tx_fetch_ind tx_fetch_ind;
  1510. struct htt_tx_fetch_confirm tx_fetch_confirm;
  1511. struct htt_tx_mode_switch_ind tx_mode_switch_ind;
  1512. struct htt_channel_change chan_change;
  1513. struct htt_peer_tx_stats peer_tx_stats;
  1514. };
  1515. } __packed;
  1516. /*** host side structures follow ***/
  1517. struct htt_tx_done {
  1518. u16 msdu_id;
  1519. u16 status;
  1520. u8 ack_rssi;
  1521. };
  1522. enum htt_tx_compl_state {
  1523. HTT_TX_COMPL_STATE_NONE,
  1524. HTT_TX_COMPL_STATE_ACK,
  1525. HTT_TX_COMPL_STATE_NOACK,
  1526. HTT_TX_COMPL_STATE_DISCARD,
  1527. };
  1528. struct htt_peer_map_event {
  1529. u8 vdev_id;
  1530. u16 peer_id;
  1531. u8 addr[ETH_ALEN];
  1532. };
  1533. struct htt_peer_unmap_event {
  1534. u16 peer_id;
  1535. };
  1536. struct ath10k_htt_txbuf_32 {
  1537. struct htt_data_tx_desc_frag frags[2];
  1538. struct ath10k_htc_hdr htc_hdr;
  1539. struct htt_cmd_hdr cmd_hdr;
  1540. struct htt_data_tx_desc cmd_tx;
  1541. } __packed __aligned(4);
  1542. struct ath10k_htt_txbuf_64 {
  1543. struct htt_data_tx_desc_frag frags[2];
  1544. struct ath10k_htc_hdr htc_hdr;
  1545. struct htt_cmd_hdr cmd_hdr;
  1546. struct htt_data_tx_desc_64 cmd_tx;
  1547. } __packed __aligned(4);
  1548. struct ath10k_htt {
  1549. struct ath10k *ar;
  1550. enum ath10k_htc_ep_id eid;
  1551. struct sk_buff_head rx_indication_head;
  1552. u8 target_version_major;
  1553. u8 target_version_minor;
  1554. struct completion target_version_received;
  1555. u8 max_num_amsdu;
  1556. u8 max_num_ampdu;
  1557. const enum htt_t2h_msg_type *t2h_msg_types;
  1558. u32 t2h_msg_types_max;
  1559. struct {
  1560. /*
  1561. * Ring of network buffer objects - This ring is
  1562. * used exclusively by the host SW. This ring
  1563. * mirrors the dev_addrs_ring that is shared
  1564. * between the host SW and the MAC HW. The host SW
  1565. * uses this netbufs ring to locate the network
  1566. * buffer objects whose data buffers the HW has
  1567. * filled.
  1568. */
  1569. struct sk_buff **netbufs_ring;
  1570. /* This is used only with firmware supporting IN_ORD_IND.
  1571. *
  1572. * With Full Rx Reorder the HTT Rx Ring is more of a temporary
  1573. * buffer ring from which buffer addresses are copied by the
  1574. * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
  1575. * pointing to specific (re-ordered) buffers.
  1576. *
  1577. * FIXME: With kernel generic hashing functions there's a lot
  1578. * of hash collisions for sk_buffs.
  1579. */
  1580. bool in_ord_rx;
  1581. DECLARE_HASHTABLE(skb_table, 4);
  1582. /*
  1583. * Ring of buffer addresses -
  1584. * This ring holds the "physical" device address of the
  1585. * rx buffers the host SW provides for the MAC HW to
  1586. * fill.
  1587. */
  1588. union {
  1589. __le64 *paddrs_ring_64;
  1590. __le32 *paddrs_ring_32;
  1591. };
  1592. /*
  1593. * Base address of ring, as a "physical" device address
  1594. * rather than a CPU address.
  1595. */
  1596. dma_addr_t base_paddr;
  1597. /* how many elems in the ring (power of 2) */
  1598. int size;
  1599. /* size - 1 */
  1600. unsigned int size_mask;
  1601. /* how many rx buffers to keep in the ring */
  1602. int fill_level;
  1603. /* how many rx buffers (full+empty) are in the ring */
  1604. int fill_cnt;
  1605. /*
  1606. * alloc_idx - where HTT SW has deposited empty buffers
  1607. * This is allocated in consistent mem, so that the FW can
  1608. * read this variable, and program the HW's FW_IDX reg with
  1609. * the value of this shadow register.
  1610. */
  1611. struct {
  1612. __le32 *vaddr;
  1613. dma_addr_t paddr;
  1614. } alloc_idx;
  1615. /* where HTT SW has processed bufs filled by rx MAC DMA */
  1616. struct {
  1617. unsigned int msdu_payld;
  1618. } sw_rd_idx;
  1619. /*
  1620. * refill_retry_timer - timer triggered when the ring is
  1621. * not refilled to the level expected
  1622. */
  1623. struct timer_list refill_retry_timer;
  1624. /* Protects access to all rx ring buffer state variables */
  1625. spinlock_t lock;
  1626. } rx_ring;
  1627. unsigned int prefetch_len;
  1628. /* Protects access to pending_tx, num_pending_tx */
  1629. spinlock_t tx_lock;
  1630. int max_num_pending_tx;
  1631. int num_pending_tx;
  1632. int num_pending_mgmt_tx;
  1633. struct idr pending_tx;
  1634. wait_queue_head_t empty_tx_wq;
  1635. /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
  1636. DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
  1637. /* set if host-fw communication goes haywire
  1638. * used to avoid further failures
  1639. */
  1640. bool rx_confused;
  1641. atomic_t num_mpdus_ready;
  1642. /* This is used to group tx/rx completions separately and process them
  1643. * in batches to reduce cache stalls
  1644. */
  1645. struct sk_buff_head rx_msdus_q;
  1646. struct sk_buff_head rx_in_ord_compl_q;
  1647. struct sk_buff_head tx_fetch_ind_q;
  1648. /* rx_status template */
  1649. struct ieee80211_rx_status rx_status;
  1650. struct {
  1651. dma_addr_t paddr;
  1652. union {
  1653. struct htt_msdu_ext_desc *vaddr_desc_32;
  1654. struct htt_msdu_ext_desc_64 *vaddr_desc_64;
  1655. };
  1656. size_t size;
  1657. } frag_desc;
  1658. struct {
  1659. dma_addr_t paddr;
  1660. union {
  1661. struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
  1662. struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
  1663. };
  1664. size_t size;
  1665. } txbuf;
  1666. struct {
  1667. bool enabled;
  1668. struct htt_q_state *vaddr;
  1669. dma_addr_t paddr;
  1670. u16 num_push_allowed;
  1671. u16 num_peers;
  1672. u16 num_tids;
  1673. enum htt_tx_mode_switch_mode mode;
  1674. enum htt_q_depth_type type;
  1675. } tx_q_state;
  1676. bool tx_mem_allocated;
  1677. const struct ath10k_htt_tx_ops *tx_ops;
  1678. const struct ath10k_htt_rx_ops *rx_ops;
  1679. bool disable_tx_comp;
  1680. bool bundle_tx;
  1681. struct sk_buff_head tx_req_head;
  1682. struct sk_buff_head tx_complete_head;
  1683. };
  1684. struct ath10k_htt_tx_ops {
  1685. int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
  1686. int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
  1687. int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
  1688. void (*htt_free_frag_desc)(struct ath10k_htt *htt);
  1689. int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
  1690. struct sk_buff *msdu);
  1691. int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
  1692. void (*htt_free_txbuff)(struct ath10k_htt *htt);
  1693. int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
  1694. u8 max_subfrms_ampdu,
  1695. u8 max_subfrms_amsdu);
  1696. void (*htt_flush_tx)(struct ath10k_htt *htt);
  1697. };
  1698. static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
  1699. {
  1700. if (!htt->tx_ops->htt_send_rx_ring_cfg)
  1701. return -EOPNOTSUPP;
  1702. return htt->tx_ops->htt_send_rx_ring_cfg(htt);
  1703. }
  1704. static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
  1705. {
  1706. if (!htt->tx_ops->htt_send_frag_desc_bank_cfg)
  1707. return -EOPNOTSUPP;
  1708. return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
  1709. }
  1710. static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt)
  1711. {
  1712. if (!htt->tx_ops->htt_alloc_frag_desc)
  1713. return -EOPNOTSUPP;
  1714. return htt->tx_ops->htt_alloc_frag_desc(htt);
  1715. }
  1716. static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt)
  1717. {
  1718. if (htt->tx_ops->htt_free_frag_desc)
  1719. htt->tx_ops->htt_free_frag_desc(htt);
  1720. }
  1721. static inline int ath10k_htt_tx(struct ath10k_htt *htt,
  1722. enum ath10k_hw_txrx_mode txmode,
  1723. struct sk_buff *msdu)
  1724. {
  1725. return htt->tx_ops->htt_tx(htt, txmode, msdu);
  1726. }
  1727. static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
  1728. {
  1729. if (htt->tx_ops->htt_flush_tx)
  1730. htt->tx_ops->htt_flush_tx(htt);
  1731. }
  1732. static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
  1733. {
  1734. if (!htt->tx_ops->htt_alloc_txbuff)
  1735. return -EOPNOTSUPP;
  1736. return htt->tx_ops->htt_alloc_txbuff(htt);
  1737. }
  1738. static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
  1739. {
  1740. if (htt->tx_ops->htt_free_txbuff)
  1741. htt->tx_ops->htt_free_txbuff(htt);
  1742. }
  1743. static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
  1744. u8 max_subfrms_ampdu,
  1745. u8 max_subfrms_amsdu)
  1746. {
  1747. if (!htt->tx_ops->htt_h2t_aggr_cfg_msg)
  1748. return -EOPNOTSUPP;
  1749. return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt,
  1750. max_subfrms_ampdu,
  1751. max_subfrms_amsdu);
  1752. }
  1753. struct ath10k_htt_rx_ops {
  1754. size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
  1755. void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
  1756. void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
  1757. int idx);
  1758. void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
  1759. void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
  1760. bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt,
  1761. struct htt_rx_fragment_indication *rx,
  1762. struct sk_buff *skb);
  1763. };
  1764. static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
  1765. {
  1766. if (!htt->rx_ops->htt_get_rx_ring_size)
  1767. return 0;
  1768. return htt->rx_ops->htt_get_rx_ring_size(htt);
  1769. }
  1770. static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt,
  1771. void *vaddr)
  1772. {
  1773. if (htt->rx_ops->htt_config_paddrs_ring)
  1774. htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
  1775. }
  1776. static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt,
  1777. dma_addr_t paddr,
  1778. int idx)
  1779. {
  1780. if (htt->rx_ops->htt_set_paddrs_ring)
  1781. htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
  1782. }
  1783. static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt)
  1784. {
  1785. if (!htt->rx_ops->htt_get_vaddr_ring)
  1786. return NULL;
  1787. return htt->rx_ops->htt_get_vaddr_ring(htt);
  1788. }
  1789. static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
  1790. {
  1791. if (htt->rx_ops->htt_reset_paddrs_ring)
  1792. htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
  1793. }
  1794. static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
  1795. struct htt_rx_fragment_indication *rx,
  1796. struct sk_buff *skb)
  1797. {
  1798. if (!htt->rx_ops->htt_rx_proc_rx_frag_ind)
  1799. return true;
  1800. return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
  1801. }
  1802. /* the driver strongly assumes that the rx header status be 64 bytes long,
  1803. * so all possible rx_desc structures must respect this assumption.
  1804. */
  1805. #define RX_HTT_HDR_STATUS_LEN 64
  1806. /* The rx descriptor structure layout is programmed via rx ring setup
  1807. * so that FW knows how to transfer the rx descriptor to the host.
  1808. * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
  1809. * when modifying the structure layout of the rx descriptor beyond what it expects
  1810. * (even if it correctly programmed during the rx ring setup).
  1811. * Therefore we must keep two different memory layouts, abstract the rx descriptor
  1812. * representation and use ath10k_rx_desc_ops
  1813. * for correctly accessing rx descriptor data.
  1814. */
  1815. /* base struct used for abstracting the rx descritor representation */
  1816. struct htt_rx_desc {
  1817. union {
  1818. /* This field is filled on the host using the msdu buffer
  1819. * from htt_rx_indication
  1820. */
  1821. struct fw_rx_desc_base fw_desc;
  1822. u32 pad;
  1823. } __packed;
  1824. } __packed;
  1825. /* rx descriptor for wcn3990 and possibly extensible for newer cards
  1826. * Buffers like this are placed on the rx ring.
  1827. */
  1828. struct htt_rx_desc_v2 {
  1829. struct htt_rx_desc base;
  1830. struct {
  1831. struct rx_attention attention;
  1832. struct rx_frag_info frag_info;
  1833. struct rx_mpdu_start mpdu_start;
  1834. struct rx_msdu_start msdu_start;
  1835. struct rx_msdu_end msdu_end;
  1836. struct rx_mpdu_end mpdu_end;
  1837. struct rx_ppdu_start ppdu_start;
  1838. struct rx_ppdu_end ppdu_end;
  1839. } __packed;
  1840. u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
  1841. u8 msdu_payload[];
  1842. };
  1843. /* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
  1844. * works correctly. We keep a single rx descriptor for all these three
  1845. * families of cards because from tests it seems to be the most stable solution,
  1846. * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
  1847. * during some tests.
  1848. * Buffers like this are placed on the rx ring.
  1849. */
  1850. struct htt_rx_desc_v1 {
  1851. struct htt_rx_desc base;
  1852. struct {
  1853. struct rx_attention attention;
  1854. struct rx_frag_info_v1 frag_info;
  1855. struct rx_mpdu_start mpdu_start;
  1856. struct rx_msdu_start_v1 msdu_start;
  1857. struct rx_msdu_end_v1 msdu_end;
  1858. struct rx_mpdu_end mpdu_end;
  1859. struct rx_ppdu_start ppdu_start;
  1860. struct rx_ppdu_end_v1 ppdu_end;
  1861. } __packed;
  1862. u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
  1863. u8 msdu_payload[];
  1864. };
  1865. /* rx_desc abstraction */
  1866. struct ath10k_htt_rx_desc_ops {
  1867. /* These fields are mandatory, they must be specified in any instance */
  1868. /* sizeof() of the rx_desc structure used by this hw */
  1869. size_t rx_desc_size;
  1870. /* offset of msdu_payload inside the rx_desc structure used by this hw */
  1871. size_t rx_desc_msdu_payload_offset;
  1872. /* These fields are options.
  1873. * When a field is not provided the default implementation gets used
  1874. * (see the ath10k_rx_desc_* operations below for more info about the defaults)
  1875. */
  1876. bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
  1877. int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
  1878. /* Safely cast from a void* buffer containing an rx descriptor
  1879. * to the proper rx_desc structure
  1880. */
  1881. struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
  1882. void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
  1883. struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
  1884. struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
  1885. struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
  1886. struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
  1887. struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
  1888. struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
  1889. struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
  1890. struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
  1891. u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
  1892. u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
  1893. };
  1894. extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
  1895. extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
  1896. extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
  1897. static inline int
  1898. ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  1899. {
  1900. if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
  1901. return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
  1902. return 0;
  1903. }
  1904. static inline bool
  1905. ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  1906. {
  1907. if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
  1908. return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
  1909. return false;
  1910. }
  1911. /* The default implementation of all these getters is using the old rx_desc,
  1912. * so that it is easier to define the ath10k_htt_rx_desc_ops instances.
  1913. * But probably, if new wireless cards must be supported, it would be better
  1914. * to switch the default implementation to the new rx_desc, since this would
  1915. * make the extension easier .
  1916. */
  1917. static inline struct htt_rx_desc *
  1918. ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff)
  1919. {
  1920. if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
  1921. return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
  1922. return &((struct htt_rx_desc_v1 *)buff)->base;
  1923. }
  1924. static inline void
  1925. ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
  1926. struct htt_rx_ring_rx_desc_offsets *off)
  1927. {
  1928. if (hw->rx_desc_ops->rx_desc_get_offsets) {
  1929. hw->rx_desc_ops->rx_desc_get_offsets(off);
  1930. } else {
  1931. #define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4)
  1932. off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
  1933. off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
  1934. off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
  1935. off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
  1936. off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
  1937. off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
  1938. off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
  1939. off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
  1940. off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
  1941. off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
  1942. #undef desc_offset
  1943. }
  1944. }
  1945. static inline struct rx_attention *
  1946. ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  1947. {
  1948. struct htt_rx_desc_v1 *rx_desc;
  1949. if (hw->rx_desc_ops->rx_desc_get_attention)
  1950. return hw->rx_desc_ops->rx_desc_get_attention(rxd);
  1951. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  1952. return &rx_desc->attention;
  1953. }
  1954. static inline struct rx_frag_info_common *
  1955. ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  1956. {
  1957. struct htt_rx_desc_v1 *rx_desc;
  1958. if (hw->rx_desc_ops->rx_desc_get_frag_info)
  1959. return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
  1960. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  1961. return &rx_desc->frag_info.common;
  1962. }
  1963. static inline struct rx_mpdu_start *
  1964. ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  1965. {
  1966. struct htt_rx_desc_v1 *rx_desc;
  1967. if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
  1968. return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
  1969. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  1970. return &rx_desc->mpdu_start;
  1971. }
  1972. static inline struct rx_mpdu_end *
  1973. ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  1974. {
  1975. struct htt_rx_desc_v1 *rx_desc;
  1976. if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
  1977. return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
  1978. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  1979. return &rx_desc->mpdu_end;
  1980. }
  1981. static inline struct rx_msdu_start_common *
  1982. ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  1983. {
  1984. struct htt_rx_desc_v1 *rx_desc;
  1985. if (hw->rx_desc_ops->rx_desc_get_msdu_start)
  1986. return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
  1987. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  1988. return &rx_desc->msdu_start.common;
  1989. }
  1990. static inline struct rx_msdu_end_common *
  1991. ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  1992. {
  1993. struct htt_rx_desc_v1 *rx_desc;
  1994. if (hw->rx_desc_ops->rx_desc_get_msdu_end)
  1995. return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
  1996. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  1997. return &rx_desc->msdu_end.common;
  1998. }
  1999. static inline struct rx_ppdu_start *
  2000. ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  2001. {
  2002. struct htt_rx_desc_v1 *rx_desc;
  2003. if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
  2004. return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
  2005. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  2006. return &rx_desc->ppdu_start;
  2007. }
  2008. static inline struct rx_ppdu_end_common *
  2009. ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  2010. {
  2011. struct htt_rx_desc_v1 *rx_desc;
  2012. if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
  2013. return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
  2014. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  2015. return &rx_desc->ppdu_end.common;
  2016. }
  2017. static inline u8 *
  2018. ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  2019. {
  2020. struct htt_rx_desc_v1 *rx_desc;
  2021. if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
  2022. return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
  2023. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  2024. return rx_desc->rx_hdr_status;
  2025. }
  2026. static inline u8 *
  2027. ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
  2028. {
  2029. struct htt_rx_desc_v1 *rx_desc;
  2030. if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
  2031. return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
  2032. rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
  2033. return rx_desc->msdu_payload;
  2034. }
  2035. #define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
  2036. #define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
  2037. #define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
  2038. #define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12
  2039. #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
  2040. #define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13
  2041. #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00010000
  2042. #define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 16
  2043. #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000
  2044. #define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17
  2045. struct htt_rx_desc_base_hl {
  2046. __le32 info; /* HTT_RX_DESC_HL_INFO_ */
  2047. };
  2048. struct htt_rx_chan_info {
  2049. __le16 primary_chan_center_freq_mhz;
  2050. __le16 contig_chan1_center_freq_mhz;
  2051. __le16 contig_chan2_center_freq_mhz;
  2052. u8 phy_mode;
  2053. u8 reserved;
  2054. } __packed;
  2055. #define HTT_RX_DESC_ALIGN 8
  2056. #define HTT_MAC_ADDR_LEN 6
  2057. /*
  2058. * FIX THIS
  2059. * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
  2060. * rounded up to a cache line size.
  2061. */
  2062. #define HTT_RX_BUF_SIZE 2048
  2063. /* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
  2064. * because it depends on the underlying device rx_desc representation
  2065. */
  2066. static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
  2067. {
  2068. return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
  2069. }
  2070. /* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
  2071. * aggregated traffic more nicely.
  2072. */
  2073. #define ATH10K_HTT_MAX_NUM_REFILL 100
  2074. /*
  2075. * DMA_MAP expects the buffer to be an integral number of cache lines.
  2076. * Rather than checking the actual cache line size, this code makes a
  2077. * conservative estimate of what the cache line size could be.
  2078. */
  2079. #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
  2080. #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
  2081. /* These values are default in most firmware revisions and apparently are a
  2082. * sweet spot performance wise.
  2083. */
  2084. #define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
  2085. #define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
  2086. int ath10k_htt_connect(struct ath10k_htt *htt);
  2087. int ath10k_htt_init(struct ath10k *ar);
  2088. int ath10k_htt_setup(struct ath10k_htt *htt);
  2089. int ath10k_htt_tx_start(struct ath10k_htt *htt);
  2090. void ath10k_htt_tx_stop(struct ath10k_htt *htt);
  2091. void ath10k_htt_tx_destroy(struct ath10k_htt *htt);
  2092. void ath10k_htt_tx_free(struct ath10k_htt *htt);
  2093. int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
  2094. int ath10k_htt_rx_ring_refill(struct ath10k *ar);
  2095. void ath10k_htt_rx_free(struct ath10k_htt *htt);
  2096. void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
  2097. void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
  2098. bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
  2099. int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
  2100. int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
  2101. u64 cookie);
  2102. void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
  2103. int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
  2104. __le32 token,
  2105. __le16 fetch_seq_num,
  2106. struct htt_tx_fetch_record *records,
  2107. size_t num_records);
  2108. void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
  2109. void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
  2110. struct ieee80211_txq *txq);
  2111. void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
  2112. struct ieee80211_txq *txq);
  2113. void ath10k_htt_tx_txq_sync(struct ath10k *ar);
  2114. void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
  2115. int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
  2116. void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
  2117. int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
  2118. bool is_presp);
  2119. int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
  2120. void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
  2121. int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
  2122. void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
  2123. struct sk_buff *skb);
  2124. int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
  2125. int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
  2126. void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
  2127. void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
  2128. #endif