cdp_txrx_ops.h 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841
  1. /*
  2. * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #include <wmi_unified_api.h>
  32. #include <wdi_event_api.h>
  33. #ifdef IPA_OFFLOAD
  34. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  35. defined(CONFIG_IPA_WDI_UNIFIED_API)
  36. #include <qdf_ipa_wdi3.h>
  37. #else
  38. #include <qdf_ipa.h>
  39. #endif
  40. #endif
  41. /**
  42. * bitmap values to indicate special handling of peer_delete
  43. */
  44. #define CDP_PEER_DELETE_NO_SPECIAL 0
  45. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  46. struct hif_opaque_softc;
  47. /* same as ieee80211_nac_param */
  48. enum cdp_nac_param_cmd {
  49. /* IEEE80211_NAC_PARAM_ADD */
  50. CDP_NAC_PARAM_ADD = 1,
  51. /* IEEE80211_NAC_PARAM_DEL */
  52. CDP_NAC_PARAM_DEL,
  53. /* IEEE80211_NAC_PARAM_LIST */
  54. CDP_NAC_PARAM_LIST,
  55. };
  56. #define CDP_DELBA_INTERVAL_MS 3000
  57. /**
  58. * enum cdp_delba_rcode - CDP reason code for sending DELBA
  59. * @CDP_DELBA_REASON_NONE: None
  60. * @CDP_DELBA_2K_JUMP: Sending DELBA from 2k_jump_handle
  61. */
  62. enum cdp_delba_rcode {
  63. CDP_DELBA_REASON_NONE = 0,
  64. CDP_DELBA_2K_JUMP,
  65. };
  66. /**
  67. * enum vdev_peer_protocol_enter_exit - whether ingress or egress
  68. * @CDP_VDEV_PEER_PROTOCOL_IS_INGRESS: ingress
  69. * @CDP_VDEV_PEER_PROTOCOL_IS_EGRESS: egress
  70. *
  71. * whether ingress or egress
  72. */
  73. enum vdev_peer_protocol_enter_exit {
  74. CDP_VDEV_PEER_PROTOCOL_IS_INGRESS,
  75. CDP_VDEV_PEER_PROTOCOL_IS_EGRESS
  76. };
  77. /**
  78. * enum vdev_peer_protocol_tx_rx - whether tx or rx
  79. * @CDP_VDEV_PEER_PROTOCOL_IS_TX: tx
  80. * @CDP_VDEV_PEER_PROTOCOL_IS_RX: rx
  81. *
  82. * whether tx or rx
  83. */
  84. enum vdev_peer_protocol_tx_rx {
  85. CDP_VDEV_PEER_PROTOCOL_IS_TX,
  86. CDP_VDEV_PEER_PROTOCOL_IS_RX
  87. };
  88. /**
  89. * enum vdev_ll_conn_actions - Actions to informvdev about
  90. * low latency connection.
  91. * @CDP_VDEV_LL_CONN_ADD: Add Low latency connection
  92. * @CDP_VDEV_LL_CONN_DEL: Delete Low latency connection
  93. */
  94. enum vdev_ll_conn_actions {
  95. CDP_VDEV_LL_CONN_ADD,
  96. CDP_VDEV_LL_CONN_DEL
  97. };
  98. /******************************************************************************
  99. *
  100. * Control Interface (A Interface)
  101. *
  102. *****************************************************************************/
  103. struct cdp_cmn_ops {
  104. QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  105. int (*txrx_pdev_attach_target)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  106. QDF_STATUS (*txrx_vdev_attach)
  107. (struct cdp_soc_t *soc, uint8_t pdev_id, uint8_t *mac,
  108. uint8_t vdev_id, enum wlan_op_mode op_mode,
  109. enum wlan_op_subtype subtype);
  110. QDF_STATUS
  111. (*txrx_vdev_detach)(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
  112. ol_txrx_vdev_delete_cb callback,
  113. void *cb_context);
  114. QDF_STATUS (*txrx_pdev_attach)
  115. (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev,
  116. qdf_device_t osdev, uint8_t pdev_id);
  117. int (*txrx_pdev_post_attach)(struct cdp_soc_t *soc, uint8_t pdev_id);
  118. void
  119. (*txrx_pdev_pre_detach)(struct cdp_soc_t *soc, uint8_t pdev_id,
  120. int force);
  121. QDF_STATUS
  122. (*txrx_pdev_detach)(struct cdp_soc_t *psoc, uint8_t pdev_id,
  123. int force);
  124. /**
  125. * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
  126. * @soc: soc dp handle
  127. * @pdev_id: id of Dp pdev handle
  128. * @force: Force deinit or not
  129. *
  130. * Return: QDF_STATUS
  131. */
  132. QDF_STATUS
  133. (*txrx_pdev_deinit)(struct cdp_soc_t *soc, uint8_t pdev_id,
  134. int force);
  135. QDF_STATUS
  136. (*txrx_peer_create)
  137. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  138. uint8_t *peer_mac_addr);
  139. QDF_STATUS
  140. (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  141. uint8_t *peer_mac);
  142. QDF_STATUS
  143. (*txrx_cp_peer_del_response)
  144. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  145. uint8_t *peer_mac_addr);
  146. QDF_STATUS
  147. (*txrx_peer_teardown)
  148. (struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *peer_mac);
  149. int (*txrx_peer_add_ast)
  150. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  151. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  152. uint32_t flags);
  153. int (*txrx_peer_update_ast)
  154. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  155. uint8_t *mac_addr, uint32_t flags);
  156. bool (*txrx_peer_get_ast_info_by_soc)
  157. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  158. struct cdp_ast_entry_info *ast_entry_info);
  159. bool (*txrx_peer_get_ast_info_by_pdev)
  160. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  161. uint8_t pdev_id,
  162. struct cdp_ast_entry_info *ast_entry_info);
  163. QDF_STATUS (*txrx_peer_ast_delete_by_soc)
  164. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  165. txrx_ast_free_cb callback,
  166. void *cookie);
  167. QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
  168. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  169. uint8_t pdev_id,
  170. txrx_ast_free_cb callback,
  171. void *cookie);
  172. QDF_STATUS
  173. (*txrx_peer_delete)(struct cdp_soc_t *soc, uint8_t vdev_id,
  174. uint8_t *peer_mac, uint32_t bitmap);
  175. QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_soc_t *soc,
  176. uint8_t vdev_id,
  177. uint8_t smart_monitor);
  178. void (*txrx_peer_delete_sync)(struct cdp_soc_t *soc, uint8_t vdev_id,
  179. uint8_t *peer_mac,
  180. QDF_STATUS(*delete_cb)(
  181. uint8_t vdev_id,
  182. uint32_t peerid_cnt,
  183. uint16_t *peerid_list),
  184. uint32_t bitmap);
  185. void (*txrx_peer_unmap_sync_cb_set)(struct cdp_soc_t *soc_hdl,
  186. uint8_t pdev_id,
  187. ol_txrx_peer_unmap_sync_cb
  188. peer_unmap_sync);
  189. QDF_STATUS
  190. (*txrx_get_peer_mac_from_peer_id)
  191. (struct cdp_soc_t *cdp_soc,
  192. uint32_t peer_id, uint8_t *peer_mac);
  193. void
  194. (*txrx_vdev_tx_lock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  195. void
  196. (*txrx_vdev_tx_unlock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  197. QDF_STATUS
  198. (*txrx_ath_getstats)(struct cdp_soc_t *soc, uint8_t id,
  199. struct cdp_dev_stats *stats, uint8_t type);
  200. QDF_STATUS
  201. (*txrx_set_gid_flag)(struct cdp_soc_t *soc, uint8_t pdev_id,
  202. u_int8_t *mem_status,
  203. u_int8_t *user_position);
  204. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_soc_t *soc,
  205. uint8_t pdev_id);
  206. QDF_STATUS
  207. (*txrx_if_mgmt_drain)(struct cdp_soc_t *soc, uint8_t pdev_id,
  208. int force);
  209. QDF_STATUS
  210. (*txrx_set_curchan)(struct cdp_soc_t *soc, uint8_t pdev_id,
  211. uint32_t chan_mhz);
  212. QDF_STATUS
  213. (*txrx_set_privacy_filters)
  214. (struct cdp_soc_t *soc, uint8_t vdev_id, void *filter,
  215. uint32_t num);
  216. uint32_t (*txrx_get_cfg)(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg);
  217. /********************************************************************
  218. * Data Interface (B Interface)
  219. ********************************************************************/
  220. QDF_STATUS
  221. (*txrx_vdev_register)(struct cdp_soc_t *soc, uint8_t vdev_id,
  222. ol_osif_vdev_handle osif_vdev,
  223. struct ol_txrx_ops *txrx_ops);
  224. int (*txrx_mgmt_send)(struct cdp_soc_t *soc, uint8_t vdev_id,
  225. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  226. int (*txrx_mgmt_send_ext)(struct cdp_soc_t *soc, uint8_t vdev_id,
  227. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  228. uint8_t use_6mbps, uint16_t chanfreq);
  229. /**
  230. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  231. * callback function
  232. */
  233. QDF_STATUS
  234. (*txrx_mgmt_tx_cb_set)(struct cdp_soc_t *soc, uint8_t pdev_id,
  235. uint8_t type,
  236. ol_txrx_mgmt_tx_cb download_cb,
  237. ol_txrx_mgmt_tx_cb ota_ack_cb,
  238. void *ctxt);
  239. /**
  240. * ol_txrx_data_tx_cb - Function registered with the data path
  241. * that is called when tx frames marked as "no free" are
  242. * done being transmitted
  243. */
  244. void (*txrx_data_tx_cb_set)(struct cdp_soc_t *soc, uint8_t vdev_id,
  245. ol_txrx_data_tx_cb callback, void *ctxt);
  246. qdf_nbuf_t (*tx_send_exc)
  247. (ol_txrx_soc_handle soc, uint8_t vdev_id, qdf_nbuf_t msdu_list,
  248. struct cdp_tx_exception_metadata *tx_exc_metadata);
  249. /*******************************************************************
  250. * Statistics and Debugging Interface (C Interface)
  251. ********************************************************************/
  252. int (*txrx_aggr_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  253. int max_subfrms_ampdu,
  254. int max_subfrms_amsdu);
  255. A_STATUS
  256. (*txrx_fw_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  257. struct ol_txrx_stats_req *req,
  258. bool per_vdev, bool response_expected);
  259. int (*txrx_debug)(struct cdp_soc_t *soc, uint8_t vdev_id,
  260. int debug_specs);
  261. QDF_STATUS
  262. (*txrx_fw_stats_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  263. uint8_t cfg_stats_type, uint32_t cfg_val);
  264. void (*txrx_print_level_set)(unsigned level);
  265. /**
  266. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  267. * @soc: datapath soc handle
  268. * @vdev_id: vdev id
  269. *
  270. * Return: vdev mac address
  271. */
  272. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_soc_t *soc,
  273. uint8_t vdev_id);
  274. /**
  275. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  276. * @soc: datapath soc handle
  277. * @vdev_id: vdev id
  278. *
  279. * Return: Handle to control pdev
  280. */
  281. struct cdp_cfg *(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_soc_t *soc,
  282. uint8_t vdev_id);
  283. /**
  284. * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
  285. * @soc: datapath soc handle
  286. * @pdev: pdev id
  287. *
  288. * Return: vdev_id
  289. */
  290. uint8_t (*txrx_get_mon_vdev_from_pdev)(struct cdp_soc_t *soc,
  291. uint8_t pdev_id);
  292. void (*txrx_soc_detach)(struct cdp_soc_t *soc);
  293. /**
  294. * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
  295. * @soc: Opaque Dp handle
  296. *
  297. * Return None
  298. */
  299. void (*txrx_soc_deinit)(struct cdp_soc_t *soc);
  300. /**
  301. * txrx_soc_init() - Initialize dp soc and dp ring memory
  302. * @soc: Opaque Dp handle
  303. * @ctrl_psoc: Opaque Cp handle
  304. * @htchdl: Opaque htc handle
  305. * @hifhdl: Opaque hif handle
  306. *
  307. * Return: None
  308. */
  309. void *(*txrx_soc_init)(struct cdp_soc_t *soc,
  310. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  311. struct hif_opaque_softc *hif_handle,
  312. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  313. struct ol_if_ops *ol_ops, uint16_t device_id);
  314. QDF_STATUS (*txrx_pdev_init)(ol_txrx_soc_handle soc,
  315. HTC_HANDLE htc_handle,
  316. qdf_device_t qdf_osdev,
  317. uint8_t pdev_id);
  318. /**
  319. * txrx_tso_soc_attach() - TSO attach handler triggered during
  320. * dynamic tso activation
  321. * @soc: Opaque Dp handle
  322. *
  323. * Return: QDF status
  324. */
  325. QDF_STATUS (*txrx_tso_soc_attach)(struct cdp_soc_t *soc);
  326. /**
  327. * txrx_tso_soc_detach() - TSO detach handler triggered during
  328. * dynamic tso de-activation
  329. * @soc: Opaque Dp handle
  330. *
  331. * Return: QDF status
  332. */
  333. QDF_STATUS (*txrx_tso_soc_detach)(struct cdp_soc_t *soc);
  334. int (*addba_resp_tx_completion)(struct cdp_soc_t *cdp_soc,
  335. uint8_t *peer_mac,
  336. uint16_t vdev_id, uint8_t tid,
  337. int status);
  338. int (*addba_requestprocess)(struct cdp_soc_t *cdp_soc,
  339. uint8_t *peer_mac,
  340. uint16_t vdev_id,
  341. uint8_t dialogtoken,
  342. uint16_t tid, uint16_t batimeout,
  343. uint16_t buffersize,
  344. uint16_t startseqnum);
  345. QDF_STATUS
  346. (*addba_responsesetup)(struct cdp_soc_t *cdp_soc,
  347. uint8_t *peer_mac,
  348. uint16_t vdev_id, uint8_t tid,
  349. uint8_t *dialogtoken, uint16_t *statuscode,
  350. uint16_t *buffersize, uint16_t *batimeout);
  351. int (*delba_process)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  352. uint16_t vdev_id, int tid, uint16_t reasoncode);
  353. /**
  354. * delba_tx_completion() - Indicate delba tx status
  355. * @cdp_soc: soc handle
  356. * @peer_mac: Peer mac address
  357. * @vdev_id: vdev id
  358. * @tid: Tid number
  359. * @status: Tx completion status
  360. *
  361. * Return: 0 on Success, 1 on failure
  362. */
  363. int (*delba_tx_completion)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  364. uint16_t vdev_id,
  365. uint8_t tid, int status);
  366. QDF_STATUS
  367. (*set_addba_response)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  368. uint16_t vdev_id, uint8_t tid,
  369. uint16_t statuscode);
  370. QDF_STATUS
  371. (*set_vdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  372. uint8_t vdev_id, uint8_t map_id);
  373. int (*txrx_get_total_per)(struct cdp_soc_t *soc, uint8_t pdev_id);
  374. void (*flush_cache_rx_queue)(void);
  375. QDF_STATUS (*set_pdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  376. uint8_t pdev_id,
  377. uint8_t map_id,
  378. uint8_t tos, uint8_t tid);
  379. QDF_STATUS (*txrx_stats_request)(struct cdp_soc_t *soc_handle,
  380. uint8_t vdev_id,
  381. struct cdp_txrx_stats_req *req);
  382. QDF_STATUS (*display_stats)(struct cdp_soc_t *psoc, uint16_t value,
  383. enum qdf_stats_verbosity_level level);
  384. QDF_STATUS (*txrx_intr_attach)(struct cdp_soc_t *soc_handle);
  385. void (*txrx_intr_detach)(struct cdp_soc_t *soc_handle);
  386. QDF_STATUS (*set_pn_check)(struct cdp_soc_t *soc_handle,
  387. uint8_t vdev_id, uint8_t *peermac,
  388. enum cdp_sec_type sec_type,
  389. uint32_t *rx_pn);
  390. QDF_STATUS(*set_key_sec_type)(struct cdp_soc_t *soc_handle,
  391. uint8_t vdev_id, uint8_t *peermac,
  392. enum cdp_sec_type sec_type,
  393. bool is_unicast);
  394. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  395. struct cdp_config_params *params);
  396. void *(*get_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  397. void (*set_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  398. void *dp_hdl);
  399. void *(*get_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  400. uint8_t vdev_id);
  401. QDF_STATUS (*set_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  402. uint8_t vdev_id,
  403. uint16_t size);
  404. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  405. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  406. void *dp_txrx_handle);
  407. QDF_STATUS (*map_pdev_to_lmac)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  408. uint32_t lmac_id);
  409. QDF_STATUS (*handle_mode_change)(ol_txrx_soc_handle soc,
  410. uint8_t pdev_id, uint32_t lmac_id);
  411. QDF_STATUS (*set_pdev_status_down)(struct cdp_soc_t *soc_handle,
  412. uint8_t pdev_id, bool is_pdev_down);
  413. QDF_STATUS (*txrx_peer_reset_ast)
  414. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
  415. uint8_t *peer_macaddr, uint8_t vdev_id);
  416. QDF_STATUS (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  417. uint8_t vdev_id);
  418. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  419. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  420. uint8_t ac, uint32_t value);
  421. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  422. uint8_t ac, uint32_t *value);
  423. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  424. uint32_t num_peers,
  425. uint32_t max_ast_index,
  426. bool peer_map_unmap_v2);
  427. QDF_STATUS (*set_soc_param)(ol_txrx_soc_handle soc,
  428. enum cdp_soc_param_t param,
  429. uint32_t value);
  430. ol_txrx_tx_fp tx_send;
  431. /**
  432. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  433. * to deliver pkt to stack.
  434. * @soc: datapath soc handle
  435. * @vdev: vdev id
  436. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  437. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  438. */
  439. void (*txrx_get_os_rx_handles_from_vdev)
  440. (ol_txrx_soc_handle soc,
  441. uint8_t vdev_id,
  442. ol_txrx_rx_fp *stack_fn,
  443. ol_osif_vdev_handle *osif_vdev);
  444. void (*set_rate_stats_ctx)(struct cdp_soc_t *soc,
  445. void *ctx);
  446. int (*txrx_classify_update)
  447. (struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t skb,
  448. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  449. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  450. enum cdp_capabilities dp_caps);
  451. void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
  452. QDF_STATUS (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
  453. uint8_t pdev_id,
  454. void *buf);
  455. void* (*txrx_peer_get_rdkstats_ctx)(struct cdp_soc_t *soc,
  456. uint8_t vdev_id,
  457. uint8_t *mac_addr);
  458. QDF_STATUS (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
  459. uint8_t pdev_id);
  460. QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_soc_t *soc,
  461. uint8_t pdev_id,
  462. uint8_t pcp, uint8_t tid);
  463. QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_soc_t *soc,
  464. uint8_t vdev_id,
  465. uint8_t pcp, uint8_t tid);
  466. #ifdef QCA_MULTIPASS_SUPPORT
  467. QDF_STATUS (*set_vlan_groupkey)(struct cdp_soc_t *soc, uint8_t vdev_id,
  468. uint16_t vlan_id, uint16_t group_key);
  469. #endif
  470. uint16_t (*get_peer_mac_list)
  471. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  472. u_int8_t newmac[][QDF_MAC_ADDR_SIZE], uint16_t mac_cnt,
  473. bool limit);
  474. #ifdef QCA_SUPPORT_WDS_EXTENDED
  475. uint16_t (*get_wds_ext_peer_id)(ol_txrx_soc_handle soc,
  476. uint8_t vdev_id,
  477. uint8_t *mac);
  478. QDF_STATUS (*set_wds_ext_peer_rx)(ol_txrx_soc_handle soc,
  479. uint8_t vdev_id,
  480. uint8_t *mac,
  481. ol_txrx_rx_fp rx,
  482. ol_osif_peer_handle osif_peer);
  483. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  484. void (*txrx_drain)(ol_txrx_soc_handle soc);
  485. int (*get_free_desc_poolsize)(struct cdp_soc_t *soc);
  486. };
  487. struct cdp_ctrl_ops {
  488. int
  489. (*txrx_mempools_attach)(ol_txrx_soc_handle dp_soc);
  490. int
  491. (*txrx_update_filter_neighbour_peers)(
  492. struct cdp_soc_t *soc, uint8_t vdev_id,
  493. uint32_t cmd, uint8_t *macaddr);
  494. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  495. /**
  496. * @brief Update the authorize peer object at association time
  497. * @details
  498. * For the host-based implementation of rate-control, it
  499. * updates the peer/node-related parameters within rate-control
  500. * context of the peer at association.
  501. *
  502. * @param soc_hdl - pointer to the soc object
  503. * @param vdev_id - id of the virtual object
  504. * @param peer_mac - mac address of the node's object
  505. * @authorize - either to authorize or unauthorize peer
  506. *
  507. * @return QDF_STATUS
  508. */
  509. QDF_STATUS
  510. (*txrx_peer_authorize)(struct cdp_soc_t *soc_hdl,
  511. uint8_t vdev_id,
  512. uint8_t *peer_mac,
  513. u_int32_t authorize);
  514. void (*tx_flush_buffers)(struct cdp_soc_t *soc, uint8_t vdev_id);
  515. int (*txrx_is_target_ar900b)(struct cdp_soc_t *soc_hdl);
  516. QDF_STATUS
  517. (*txrx_set_vdev_param)(struct cdp_soc_t *soc, uint8_t vdev_id,
  518. enum cdp_vdev_param_type param,
  519. cdp_config_param_type val);
  520. /**
  521. * @brief Set the reo dest ring num of the radio
  522. * @details
  523. * Set the reo destination ring no on which we will receive
  524. * pkts for this radio.
  525. *
  526. * @txrx_soc - soc handle
  527. * @param pdev_id - id of physical device
  528. * @return the reo destination ring number
  529. * @param reo_dest_ring_num - value ranges between 1 - 4
  530. */
  531. QDF_STATUS (*txrx_set_pdev_reo_dest)(
  532. struct cdp_soc_t *txrx_soc,
  533. uint8_t pdev_id,
  534. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  535. /**
  536. * @brief Get the reo dest ring num of the radio
  537. * @details
  538. * Get the reo destination ring no on which we will receive
  539. * pkts for this radio.
  540. *
  541. * @txrx_soc - soc handle
  542. * @param pdev_id - id of physical device
  543. * @return the reo destination ring number
  544. */
  545. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  546. struct cdp_soc_t *txrx_soc,
  547. uint8_t pdev_id);
  548. int (*txrx_wdi_event_sub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  549. wdi_event_subscribe *event_cb_sub,
  550. uint32_t event);
  551. int (*txrx_wdi_event_unsub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  552. wdi_event_subscribe *event_cb_sub,
  553. uint32_t event);
  554. int (*txrx_get_sec_type)(ol_txrx_soc_handle soc, uint8_t vdev_id,
  555. uint8_t *peer_mac, uint8_t sec_idx);
  556. QDF_STATUS
  557. (*txrx_update_mgmt_txpow_vdev)(struct cdp_soc_t *soc,
  558. uint8_t vdev_id,
  559. uint8_t subtype, uint8_t tx_power);
  560. /**
  561. * txrx_set_pdev_param() - callback to set pdev parameter
  562. * @soc: opaque soc handle
  563. * @pdev_id:id of data path pdev handle
  564. * @val: value of pdev_tx_capture
  565. *
  566. * Return: status: 0 - Success, non-zero: Failure
  567. */
  568. QDF_STATUS (*txrx_set_pdev_param)(struct cdp_soc_t *soc,
  569. uint8_t pdev_id,
  570. enum cdp_pdev_param_type type,
  571. cdp_config_param_type val);
  572. QDF_STATUS (*txrx_get_pdev_param)(struct cdp_soc_t *soc,
  573. uint8_t pdev_id,
  574. enum cdp_pdev_param_type type,
  575. cdp_config_param_type *val);
  576. QDF_STATUS (*txrx_set_peer_param)(struct cdp_soc_t *soc,
  577. uint8_t vdev_id, uint8_t *peer_mac,
  578. enum cdp_peer_param_type param,
  579. cdp_config_param_type val);
  580. QDF_STATUS (*txrx_get_peer_param)(struct cdp_soc_t *soc,
  581. uint8_t vdev_id, uint8_t *peer_mac,
  582. enum cdp_peer_param_type param,
  583. cdp_config_param_type *val);
  584. void * (*txrx_get_pldev)(struct cdp_soc_t *soc, uint8_t pdev_id);
  585. void (*txrx_peer_flush_frags)(struct cdp_soc_t *soc, uint8_t vdev_id,
  586. uint8_t *peer_mac);
  587. #ifdef VDEV_PEER_PROTOCOL_COUNT
  588. void (*txrx_peer_protocol_cnt)(struct cdp_soc_t *soc,
  589. int8_t vdev_id,
  590. qdf_nbuf_t nbuf,
  591. bool is_egress,
  592. bool is_rx);
  593. #endif
  594. #ifdef ATH_SUPPORT_NAC_RSSI
  595. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_soc_t *cdp_soc,
  596. uint8_t vdev_id,
  597. enum cdp_nac_param_cmd cmd,
  598. char *bssid,
  599. char *client_macaddr,
  600. uint8_t chan_num);
  601. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_soc_t *cdp_soc,
  602. uint8_t vdev_id,
  603. char *macaddr,
  604. uint8_t *rssi);
  605. #endif
  606. #ifdef WLAN_SUPPORT_SCS
  607. QDF_STATUS
  608. (*txrx_enable_scs_params) (
  609. struct cdp_soc_t *soc, struct qdf_mac_addr
  610. *macaddr,
  611. uint8_t vdev_id,
  612. bool is_active);
  613. QDF_STATUS
  614. (*txrx_record_scs_params) (
  615. struct cdp_soc_t *soc, struct qdf_mac_addr
  616. *macaddr,
  617. uint8_t vdev_id,
  618. struct cdp_scs_params *scs_params,
  619. uint8_t entry_ctr,
  620. uint8_t scs_sessions);
  621. #endif
  622. #ifdef WLAN_SUPPORT_MSCS
  623. QDF_STATUS
  624. (*txrx_record_mscs_params) (
  625. struct cdp_soc_t *soc, uint8_t *macaddr,
  626. uint8_t vdev_id,
  627. struct cdp_mscs_params *mscs_params,
  628. bool active);
  629. #endif
  630. QDF_STATUS
  631. (*set_key)(struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *mac,
  632. bool is_unicast, uint32_t *key);
  633. QDF_STATUS (*txrx_get_vdev_param)(struct cdp_soc_t *soc,
  634. uint8_t vdev_id,
  635. enum cdp_vdev_param_type param,
  636. cdp_config_param_type *val);
  637. int (*enable_peer_based_pktlog)(struct cdp_soc_t *cdp_soc,
  638. uint8_t pdev_id,
  639. uint8_t *macaddr, uint8_t enb_dsb);
  640. QDF_STATUS
  641. (*calculate_delay_stats)(struct cdp_soc_t *cdp_soc,
  642. uint8_t vdev_id, qdf_nbuf_t nbuf);
  643. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  644. QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
  645. struct cdp_soc_t *soc, uint8_t pdev_id,
  646. uint32_t protocol_mask, uint16_t protocol_type,
  647. uint16_t tag);
  648. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  649. void (*txrx_dump_pdev_rx_protocol_tag_stats)(
  650. struct cdp_soc_t *soc, uint8_t pdev_id,
  651. uint16_t protocol_type);
  652. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  653. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  654. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  655. QDF_STATUS (*txrx_set_rx_flow_tag)(
  656. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  657. struct cdp_rx_flow_info *flow_info);
  658. QDF_STATUS (*txrx_dump_rx_flow_tag_stats)(
  659. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  660. struct cdp_rx_flow_info *flow_info);
  661. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  662. #ifdef QCA_MULTIPASS_SUPPORT
  663. void (*txrx_peer_set_vlan_id)(struct cdp_soc_t *cdp_soc,
  664. uint8_t vdev_id, uint8_t *peer_mac,
  665. uint16_t vlan_id);
  666. #endif
  667. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  668. QDF_STATUS (*txrx_update_peer_pkt_capture_params)(
  669. ol_txrx_soc_handle soc, uint8_t pdev_id,
  670. bool is_rx_pkt_cap_enable, uint8_t is_tx_pkt_cap_enable,
  671. uint8_t *peer_mac);
  672. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  673. QDF_STATUS
  674. (*txrx_set_psoc_param)(struct cdp_soc_t *soc,
  675. enum cdp_psoc_param_type param,
  676. cdp_config_param_type val);
  677. QDF_STATUS (*txrx_get_psoc_param)(ol_txrx_soc_handle soc,
  678. enum cdp_psoc_param_type type,
  679. cdp_config_param_type *val);
  680. #ifdef VDEV_PEER_PROTOCOL_COUNT
  681. /*
  682. * Enable per-peer protocol counters
  683. */
  684. void (*txrx_enable_peer_protocol_count)(struct cdp_soc_t *soc,
  685. int8_t vdev_id, bool enable);
  686. void (*txrx_set_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  687. int8_t vdev_id, int mask);
  688. int (*txrx_is_peer_protocol_count_enabled)(struct cdp_soc_t *soc,
  689. int8_t vdev_id);
  690. int (*txrx_get_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  691. int8_t vdev_id);
  692. #endif
  693. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  694. void (*txrx_set_delta_tsf)(struct cdp_soc_t *soc, uint8_t vdev_id,
  695. uint32_t delta_tsf);
  696. QDF_STATUS (*txrx_set_tsf_ul_delay_report)(struct cdp_soc_t *soc,
  697. uint8_t vdev_id,
  698. bool enable);
  699. QDF_STATUS (*txrx_get_uplink_delay)(struct cdp_soc_t *soc,
  700. uint8_t vdev_id,
  701. uint32_t *val);
  702. #endif
  703. };
  704. struct cdp_me_ops {
  705. void (*tx_me_alloc_descriptor)(struct cdp_soc_t *soc,
  706. uint8_t pdev_id);
  707. void (*tx_me_free_descriptor)(struct cdp_soc_t *soc, uint8_t pdev_id);
  708. uint16_t (*tx_me_convert_ucast)(struct cdp_soc_t *soc, uint8_t vdev_id,
  709. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  710. uint8_t newmaccnt, uint8_t tid,
  711. bool is_igmp);
  712. };
  713. struct cdp_mon_ops {
  714. QDF_STATUS (*txrx_reset_monitor_mode)
  715. (ol_txrx_soc_handle soc, uint8_t pdev_id, u_int8_t smart_monitor);
  716. QDF_STATUS (*txrx_deliver_tx_mgmt)
  717. (struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf);
  718. /* HK advance monitor filter support */
  719. QDF_STATUS (*txrx_set_advance_monitor_filter)
  720. (struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  721. struct cdp_monitor_filter *filter_val);
  722. /* Configure full monitor mode */
  723. QDF_STATUS
  724. (*config_full_mon_mode)(struct cdp_soc_t *soc, uint8_t val);
  725. };
  726. struct cdp_host_stats_ops {
  727. int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  728. struct ol_txrx_stats_req *req);
  729. QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc,
  730. uint8_t vdev_id);
  731. QDF_STATUS
  732. (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  733. int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id,
  734. struct cdp_stats_extd *buf);
  735. /**
  736. * @brief Enable enhanced stats functionality.
  737. *
  738. * @param soc - the soc handle
  739. * @param pdev_id - pdev_id of pdev
  740. * @return - QDF_STATUS
  741. */
  742. QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc,
  743. uint8_t pdev_id);
  744. /**
  745. * @brief Disable enhanced stats functionality.
  746. *
  747. * @param soc - the soc handle
  748. * @param pdev_id - pdev_id of pdev
  749. * @return - QDF_STATUS
  750. */
  751. QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc,
  752. uint8_t pdev_id);
  753. QDF_STATUS
  754. (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  755. QDF_STATUS
  756. (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  757. QDF_STATUS
  758. (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  759. QDF_STATUS
  760. (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  761. QDF_STATUS
  762. (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  763. QDF_STATUS
  764. (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  765. QDF_STATUS
  766. (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  767. QDF_STATUS
  768. (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr);
  769. int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  770. struct ol_txrx_stats_req *req);
  771. int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc,
  772. uint8_t pdev_id,
  773. uint8_t *addr, void *stats,
  774. uint32_t last_tx_rate_mcs,
  775. uint32_t stats_id);
  776. QDF_STATUS
  777. (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  778. uint8_t *addr,
  779. uint32_t cap, uint32_t copy_stats);
  780. QDF_STATUS
  781. (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  782. void *data,
  783. uint32_t data_len);
  784. QDF_STATUS
  785. (*txrx_update_pdev_stats)(struct cdp_soc_t *soc,
  786. uint8_t pdev_id, void *data,
  787. uint16_t stats_id);
  788. QDF_STATUS
  789. (*txrx_get_peer_stats_param)(struct cdp_soc_t *soc,
  790. uint8_t vdev_id,
  791. uint8_t *peer_mac,
  792. enum cdp_peer_stats_type type,
  793. cdp_peer_stats_param_t *buf);
  794. QDF_STATUS
  795. (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  796. uint8_t *peer_mac,
  797. struct cdp_peer_stats *peer_stats);
  798. QDF_STATUS
  799. (*txrx_get_soc_stats)(struct cdp_soc_t *soc,
  800. struct cdp_soc_stats *soc_stats);
  801. QDF_STATUS
  802. (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc,
  803. uint8_t vdev_id,
  804. uint8_t *peer_mac);
  805. QDF_STATUS
  806. (*txrx_reset_peer_stats)(struct cdp_soc_t *soc,
  807. uint8_t vdev_id, uint8_t *peer_mac);
  808. int
  809. (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  810. void *buf, bool is_aggregate);
  811. int
  812. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  813. void *data, uint32_t len,
  814. uint32_t stats_id);
  815. int
  816. (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc,
  817. uint8_t vdev_id,
  818. wmi_host_vdev_extd_stats *buffer);
  819. QDF_STATUS
  820. (*txrx_update_vdev_stats)(struct cdp_soc_t *soc,
  821. uint8_t vdev_id, void *buf,
  822. uint16_t stats_id);
  823. int
  824. (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  825. void *buf);
  826. QDF_STATUS
  827. (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  828. struct cdp_pdev_stats *buf);
  829. int
  830. (*txrx_get_ratekbps)(int preamb, int mcs,
  831. int htflag, int gintval);
  832. QDF_STATUS
  833. (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  834. uint8_t *peer_mac, void *stats,
  835. uint32_t last_tx_rate_mcs,
  836. uint32_t stats_id);
  837. QDF_STATUS
  838. (*txrx_get_spcl_vap_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  839. struct cdp_spcl_vap_stats *stats);
  840. };
  841. struct cdp_wds_ops {
  842. QDF_STATUS
  843. (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id,
  844. u_int32_t val);
  845. QDF_STATUS
  846. (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc,
  847. uint8_t vdev_id, uint8_t *peer_mac,
  848. int wds_tx_ucast, int wds_tx_mcast);
  849. int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id,
  850. uint32_t val);
  851. };
  852. struct cdp_raw_ops {
  853. QDF_STATUS
  854. (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id,
  855. qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast);
  856. };
  857. #ifdef PEER_FLOW_CONTROL
  858. struct cdp_pflow_ops {
  859. uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc,
  860. uint8_t pdev_id,
  861. enum _dp_param_t,
  862. uint32_t, void *);
  863. };
  864. #endif /* PEER_FLOW_CONTROL */
  865. #define LRO_IPV4_SEED_ARR_SZ 5
  866. #define LRO_IPV6_SEED_ARR_SZ 11
  867. /**
  868. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  869. * @lro_enable: indicates whether rx_offld is enabled
  870. * @tcp_flag: If the TCP flags from the packet do not match
  871. * the values in this field after masking with TCP flags mask
  872. * below, packet is not rx_offld eligible
  873. * @tcp_flag_mask: field for comparing the TCP values provided
  874. * above with the TCP flags field in the received packet
  875. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  876. * 5-tuple toeplitz hash for ipv4 packets
  877. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  878. * 5-tuple toeplitz hash for ipv6 packets
  879. */
  880. struct cdp_lro_hash_config {
  881. uint32_t lro_enable;
  882. uint32_t tcp_flag:9,
  883. tcp_flag_mask:9;
  884. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  885. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  886. };
  887. struct ol_if_ops {
  888. void
  889. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  890. uint8_t pdev_id, uint8_t *peer_macaddr,
  891. uint8_t vdev_id,
  892. bool hash_based, uint8_t ring_num);
  893. QDF_STATUS
  894. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  895. uint8_t pdev_id,
  896. uint8_t vdev_id, uint8_t *peer_mac,
  897. qdf_dma_addr_t hw_qdesc, int tid,
  898. uint16_t queue_num,
  899. uint8_t ba_window_size_valid,
  900. uint16_t ba_window_size);
  901. QDF_STATUS
  902. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  903. uint8_t pdev_id,
  904. uint8_t vdev_id, uint8_t *peer_macaddr,
  905. uint32_t tid_mask);
  906. int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc,
  907. uint8_t pdev_id,
  908. uint8_t *peer_mac,
  909. uint8_t *vdev_mac, enum wlan_op_mode opmode);
  910. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  911. int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  912. uint8_t vdev_id,
  913. uint8_t *peer_macaddr,
  914. uint16_t peer_id,
  915. const uint8_t *dest_macaddr,
  916. uint8_t *next_node_mac,
  917. uint32_t flags,
  918. uint8_t type);
  919. int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  920. uint8_t vdev_id,
  921. uint8_t *dest_macaddr,
  922. uint8_t *peer_macaddr,
  923. uint32_t flags);
  924. void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  925. uint8_t vdev_id,
  926. uint8_t *wds_macaddr,
  927. uint8_t type,
  928. uint8_t delete_in_fw);
  929. QDF_STATUS
  930. (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id,
  931. struct cdp_lro_hash_config *rx_offld_hash);
  932. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  933. uint8_t type);
  934. #ifdef FEATURE_NAC_RSSI
  935. uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc,
  936. uint8_t pdev_id, void *msg);
  937. #else
  938. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  939. #endif
  940. int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  941. uint16_t peer_id, uint16_t hw_peer_id,
  942. uint8_t vdev_id, uint8_t *peer_mac_addr,
  943. enum cdp_txrx_ast_entry_type peer_type,
  944. uint32_t tx_ast_hashidx);
  945. int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  946. uint16_t peer_id,
  947. uint8_t vdev_id);
  948. int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc,
  949. enum cdp_cfg_param_type param_num);
  950. void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc,
  951. uint8_t pdev_id,
  952. struct cdp_rx_mic_err_info *info);
  953. bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc,
  954. uint8_t vdev_id, uint8_t *peer_mac_addr,
  955. qdf_nbuf_t nbuf,
  956. uint16_t hdr_space);
  957. uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc,
  958. uint8_t pdev_id, uint16_t freq);
  959. uint8_t (*freq_to_band)(struct cdp_ctrl_objmgr_psoc *psoc,
  960. uint8_t pdev_id, uint16_t freq);
  961. #ifdef ATH_SUPPORT_NAC_RSSI
  962. int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  963. uint8_t pdev_id,
  964. u_int8_t vdev_id,
  965. enum cdp_nac_param_cmd cmd, char *bssid,
  966. char *client_macaddr, uint8_t chan_num);
  967. int
  968. (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  969. uint8_t pdev_id, u_int8_t vdev_id,
  970. enum cdp_nac_param_cmd cmd,
  971. char *bssid, char *client_mac);
  972. #endif
  973. int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc,
  974. uint16_t pdev_id, uint8_t *peer_macaddr);
  975. /**
  976. * send_delba() - Send delba to peer
  977. * @psoc: Objmgr soc handle
  978. * @vdev_id: dp vdev id
  979. * @peer_macaddr: Peer mac addr
  980. * @tid: Tid number
  981. * @reason_code: Reason code
  982. * @cdp_rcode: CDP reason code for sending DELBA
  983. *
  984. * Return: 0 for success, non-zero for failure
  985. */
  986. int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id,
  987. uint8_t *peer_macaddr, uint8_t tid,
  988. uint8_t reason_code, uint8_t cdp_rcode);
  989. int
  990. (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc,
  991. uint8_t vdev_id,
  992. uint8_t *dest_macaddr,
  993. uint8_t *peer_macaddr,
  994. uint32_t flags);
  995. int
  996. (*pdev_update_lmac_n_target_pdev_id)(struct cdp_ctrl_objmgr_psoc *psoc,
  997. uint8_t *pdev_id,
  998. uint8_t *lmac_id,
  999. uint8_t *target_pdev_id);
  1000. bool (*is_roam_inprogress)(uint32_t vdev_id);
  1001. enum QDF_GLOBAL_MODE (*get_con_mode)(void);
  1002. #ifdef QCA_PEER_MULTIQ_SUPPORT
  1003. int (*peer_ast_flowid_map)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  1004. uint16_t peer_id, uint8_t vdev_id,
  1005. uint8_t *peer_mac_addr);
  1006. #endif
  1007. #ifdef DP_MEM_PRE_ALLOC
  1008. void *(*dp_prealloc_get_context)(uint32_t ctxt_type);
  1009. QDF_STATUS(*dp_prealloc_put_context)(uint32_t ctxt_type, void *vaddr);
  1010. void *(*dp_prealloc_get_consistent)(uint32_t *size,
  1011. void **base_vaddr_unaligned,
  1012. qdf_dma_addr_t *paddr_unaligned,
  1013. qdf_dma_addr_t *paddr_aligned,
  1014. uint32_t align,
  1015. uint32_t ring_type);
  1016. void (*dp_prealloc_put_consistent)(qdf_size_t size,
  1017. void *vaddr_unligned,
  1018. qdf_dma_addr_t paddr);
  1019. void (*dp_get_multi_pages)(uint32_t desc_type,
  1020. size_t element_size,
  1021. uint16_t element_num,
  1022. struct qdf_mem_multi_page_t *pages,
  1023. bool cacheable);
  1024. void (*dp_put_multi_pages)(uint32_t desc_type,
  1025. struct qdf_mem_multi_page_t *pages);
  1026. #endif
  1027. int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle);
  1028. char *(*get_device_name)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1029. uint8_t pdev_id);
  1030. QDF_STATUS(*nss_stats_clr)(struct cdp_ctrl_objmgr_psoc *psoc,
  1031. uint8_t vdev_id);
  1032. int (*dp_rx_get_pending)(ol_txrx_soc_handle soc);
  1033. void (*dp_rx_sched_refill_thread)(ol_txrx_soc_handle soc);
  1034. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  1035. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1036. void (*rx_wds_ext_peer_learn)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1037. uint16_t peer_id, uint8_t vdev_id,
  1038. uint8_t *peer_macaddr);
  1039. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  1040. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1041. QDF_STATUS(*peer_update_mesh_latency_params)(
  1042. struct cdp_ctrl_objmgr_psoc *psoc,
  1043. uint8_t vdev_id, uint8_t *peer_mac, uint8_t tid,
  1044. uint32_t service_interval_dl, uint32_t burst_size_dl,
  1045. uint32_t service_interval_ul, uint32_t burst_size_ul,
  1046. uint8_t add_or_sub, uint8_t ac);
  1047. #endif
  1048. };
  1049. #ifdef DP_PEER_EXTENDED_API
  1050. /**
  1051. * struct cdp_misc_ops - mcl ops not classified
  1052. * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer
  1053. * @set_wmm_param: set wmm parameters
  1054. * @bad_peer_txctl_set_setting: configure bad peer tx limit setting
  1055. * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit
  1056. * @hl_tdls_flag_reset: reset tdls flag for vdev
  1057. * @tx_non_std: Allow the control-path SW to send data frames
  1058. * @get_vdev_id: get vdev id
  1059. * @set_wisa_mode: set wisa mode for a vdev
  1060. * @txrx_data_stall_cb_register: register data stall callback
  1061. * @txrx_data_stall_cb_deregister: deregister data stall callback
  1062. * @txrx_post_data_stall_event: post data stall event
  1063. * @runtime_suspend: ensure TXRX is ready to runtime suspend
  1064. * @runtime_resume: ensure TXRX is ready to runtime resume
  1065. * @get_opmode: get operation mode of vdev
  1066. * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for
  1067. marking first packet after wow wakeup
  1068. * @update_mac_id: update mac_id for vdev
  1069. * @flush_rx_frames: flush rx frames on the queue
  1070. * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that
  1071. has been forwarded from txrx layer
  1072. without going to upper layers
  1073. * @pkt_log_init: handler to initialize packet log
  1074. * @pkt_log_con_service: handler to connect packet log service
  1075. * @get_num_rx_contexts: handler to get number of RX contexts
  1076. * @register_packetdump_cb: register callback for different pktlog
  1077. * @unregister_packetdump_cb: unregister callback for different pktlog
  1078. * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag
  1079. * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag
  1080. *
  1081. * @vdev_inform_ll_conn: inform DP to add/delete a latency critical connection
  1082. * for this particular vdev.
  1083. * @set_swlm_enable: Enable or Disable Software Latency Manager.
  1084. * @is_swlm_enabled: Check if Software latency manager is enabled or not.
  1085. * @display_txrx_hw_info: Dump the DP rings info
  1086. *
  1087. * Function pointers for miscellaneous soc/pdev/vdev related operations.
  1088. */
  1089. struct cdp_misc_ops {
  1090. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl,
  1091. uint8_t vdev_id,
  1092. uint16_t timer_value_sec);
  1093. void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1094. struct ol_tx_wmm_param_t wmm_param);
  1095. void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl,
  1096. uint8_t pdev_id, int enable,
  1097. int period, int txq_limit);
  1098. void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl,
  1099. uint8_t pdev_id,
  1100. int level, int tput_thresh,
  1101. int tx_limit);
  1102. void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl,
  1103. uint8_t vdev_id, bool flag);
  1104. qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1105. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  1106. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  1107. uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl,
  1108. uint8_t vdev_id);
  1109. QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl,
  1110. uint8_t vdev_id, bool enable);
  1111. QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl,
  1112. uint8_t pdev_id,
  1113. data_stall_detect_cb cb);
  1114. QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl,
  1115. uint8_t pdev_id,
  1116. data_stall_detect_cb cb);
  1117. void (*txrx_post_data_stall_event)(
  1118. struct cdp_soc_t *soc_hdl,
  1119. enum data_stall_log_event_indicator indicator,
  1120. enum data_stall_log_event_type data_stall_type,
  1121. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  1122. enum data_stall_log_recovery_type recovery_type);
  1123. QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl,
  1124. uint8_t pdev_id);
  1125. QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl,
  1126. uint8_t pdev_id);
  1127. int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1128. void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl,
  1129. uint8_t pdev_id, uint8_t value);
  1130. void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1131. uint8_t mac_id);
  1132. void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1133. void *peer, bool drop);
  1134. A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl,
  1135. uint8_t vdev_id,
  1136. uint64_t *fwd_tx_packets,
  1137. uint64_t *fwd_rx_packets);
  1138. void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev,
  1139. void *scn);
  1140. void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl,
  1141. uint8_t pdev_id, void *scn);
  1142. int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl);
  1143. void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1144. ol_txrx_pktdump_cb tx_cb,
  1145. ol_txrx_pktdump_cb rx_cb);
  1146. void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl,
  1147. uint8_t pdev_id);
  1148. void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl,
  1149. uint8_t pdev_id);
  1150. void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl,
  1151. uint8_t vdev_id,
  1152. unsigned long rx_packets,
  1153. uint32_t time_in_ms,
  1154. uint32_t high_th,
  1155. uint32_t low_th);
  1156. void (*vdev_set_bundle_require_flag)(uint8_t vdev_id,
  1157. unsigned long tx_bytes,
  1158. uint32_t time_in_ms,
  1159. uint32_t high_th,
  1160. uint32_t low_th);
  1161. void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl,
  1162. uint8_t pdev_id);
  1163. QDF_STATUS (*txrx_ext_stats_request)(struct cdp_soc_t *soc_hdl,
  1164. uint8_t pdev_id,
  1165. struct cdp_txrx_ext_stats *req);
  1166. QDF_STATUS (*request_rx_hw_stats)(struct cdp_soc_t *soc_hdl,
  1167. uint8_t vdev_id);
  1168. void (*reset_rx_hw_ext_stats)(struct cdp_soc_t *soc_hdl);
  1169. QDF_STATUS (*vdev_inform_ll_conn)(struct cdp_soc_t *soc_hdl,
  1170. uint8_t vdev_id,
  1171. enum vdev_ll_conn_actions action);
  1172. QDF_STATUS (*set_swlm_enable)(struct cdp_soc_t *soc_hdl,
  1173. uint8_t val);
  1174. uint8_t (*is_swlm_enabled)(struct cdp_soc_t *soc_hdl);
  1175. void (*display_txrx_hw_info)(struct cdp_soc_t *soc_hdl);
  1176. };
  1177. /**
  1178. * struct cdp_ocb_ops - mcl ocb ops
  1179. * @set_ocb_chan_info: set OCB channel info
  1180. * @get_ocb_chan_info: get OCB channel info
  1181. *
  1182. * Function pointers for operations related to OCB.
  1183. */
  1184. struct cdp_ocb_ops {
  1185. void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1186. struct ol_txrx_ocb_set_chan ocb_set_chan);
  1187. struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)(
  1188. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1189. };
  1190. /**
  1191. * struct cdp_peer_ops - mcl peer related ops
  1192. * @register_peer:
  1193. * @clear_peer:
  1194. * @find_peer_exist
  1195. * @find_peer_exist_on_vdev
  1196. * @find_peer_exist_on_other_vdev
  1197. * @peer_state_update:
  1198. * @get_vdevid:
  1199. * @register_ocb_peer:
  1200. * @peer_get_peer_mac_addr:
  1201. * @get_peer_state:
  1202. * @update_ibss_add_peer_num_of_vdev:
  1203. * @copy_mac_addr_raw:
  1204. * @add_last_real_peer:
  1205. * @is_vdev_restore_last_peer:
  1206. * @update_last_real_peer:
  1207. */
  1208. struct cdp_peer_ops {
  1209. QDF_STATUS (*register_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1210. struct ol_txrx_desc_type *sta_desc);
  1211. QDF_STATUS (*clear_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1212. struct qdf_mac_addr peer_addr);
  1213. bool (*find_peer_exist)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1214. uint8_t *peer_addr);
  1215. bool (*find_peer_exist_on_vdev)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1216. uint8_t *peer_addr);
  1217. bool (*find_peer_exist_on_other_vdev)(struct cdp_soc_t *soc,
  1218. uint8_t vdev_id,
  1219. uint8_t *peer_addr,
  1220. uint16_t max_bssid);
  1221. QDF_STATUS (*peer_state_update)(struct cdp_soc_t *soc,
  1222. uint8_t *peer_addr,
  1223. enum ol_txrx_peer_state state);
  1224. QDF_STATUS (*get_vdevid)(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  1225. uint8_t *vdev_id);
  1226. struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev,
  1227. struct qdf_mac_addr peer_addr);
  1228. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr);
  1229. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  1230. int (*get_peer_state)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1231. uint8_t *peer_mac);
  1232. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  1233. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_soc_t *soc,
  1234. uint8_t vdev_id,
  1235. int16_t peer_num_delta);
  1236. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  1237. ol_txrx_vdev_peer_remove_cb callback,
  1238. void *callback_context, bool remove_last_peer);
  1239. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  1240. ol_txrx_vdev_peer_remove_cb callback,
  1241. void *callback_context);
  1242. void (*copy_mac_addr_raw)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1243. uint8_t *bss_addr);
  1244. void (*add_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1245. uint8_t vdev_id);
  1246. bool (*is_vdev_restore_last_peer)(struct cdp_soc_t *soc,
  1247. uint8_t vdev_id,
  1248. uint8_t *peer_mac);
  1249. void (*update_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1250. uint8_t vdev_id, bool restore_last_peer);
  1251. void (*peer_detach_force_delete)(struct cdp_soc_t *soc_hdl,
  1252. uint8_t vdev_id, uint8_t *peer_addr);
  1253. void (*set_tdls_offchan_enabled)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1254. uint8_t *peer_mac, bool val);
  1255. void (*set_peer_as_tdls_peer)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1256. uint8_t *peer_mac, bool val);
  1257. void (*peer_flush_frags)(struct cdp_soc_t *soc_hdl,
  1258. uint8_t vdev_id, uint8_t *peer_mac);
  1259. };
  1260. /**
  1261. * struct cdp_mob_stats_ops - mcl mob stats ops
  1262. * @clear_stats: handler to clear ol txrx stats
  1263. * @stats: handler to update ol txrx stats
  1264. */
  1265. struct cdp_mob_stats_ops {
  1266. QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl,
  1267. uint8_t pdev_id, uint8_t bitmap);
  1268. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  1269. };
  1270. /**
  1271. * struct cdp_pmf_ops - mcl protected management frame ops
  1272. * @get_pn_info: handler to get pn info from peer
  1273. *
  1274. * Function pointers for pmf related operations.
  1275. */
  1276. struct cdp_pmf_ops {
  1277. void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac,
  1278. uint8_t vdev_id, uint8_t **last_pn_valid,
  1279. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  1280. };
  1281. #endif
  1282. #ifdef DP_FLOW_CTL
  1283. /**
  1284. * struct cdp_cfg_ops - mcl configuration ops
  1285. * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
  1286. * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
  1287. * @cfg_attach: hardcode the configuration parameters
  1288. * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
  1289. * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
  1290. * 1 enabled, 0 disabled.
  1291. * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
  1292. * indicate that mgmt over wmi is enabled
  1293. * or not,
  1294. * 1 for enabled, 0 for disable
  1295. * @is_high_latency: get device is high or low latency device,
  1296. * 1 high latency bus, 0 low latency bus
  1297. * @set_flow_control_parameters: set flow control parameters
  1298. * @set_flow_steering: set flow_steering_enabled flag
  1299. * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
  1300. * @set_new_htt_msg_format: set new_htt_msg_format flag
  1301. * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
  1302. * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
  1303. * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
  1304. * 1 enabled, 0 disabled.
  1305. * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
  1306. * 1 enabled, 0 disabled.
  1307. */
  1308. struct cdp_cfg_ops {
  1309. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  1310. uint8_t disable_rx_fwd);
  1311. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  1312. uint8_t val);
  1313. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  1314. void (*vdev_rx_set_intrabss_fwd)(struct cdp_soc_t *soc_hdl,
  1315. uint8_t vdev_id, bool val);
  1316. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  1317. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  1318. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  1319. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  1320. void *param);
  1321. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1322. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1323. void (*set_new_htt_msg_format)(uint8_t val);
  1324. void (*set_peer_unmap_conf_support)(bool val);
  1325. bool (*get_peer_unmap_conf_support)(void);
  1326. void (*set_tx_compl_tsf64)(bool val);
  1327. bool (*get_tx_compl_tsf64)(void);
  1328. };
  1329. /**
  1330. * struct cdp_flowctl_ops - mcl flow control
  1331. * @flow_pool_map_handler: handler to map flow_id and pool descriptors
  1332. * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors
  1333. * @register_pause_cb: handler to register tx pause callback
  1334. * @set_desc_global_pool_size: handler to set global pool size
  1335. * @dump_flow_pool_info: handler to dump global and flow pool info
  1336. * @tx_desc_thresh_reached: handler to set tx desc threshold
  1337. *
  1338. * Function pointers for operations related to flow control
  1339. */
  1340. struct cdp_flowctl_ops {
  1341. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  1342. uint8_t pdev_id,
  1343. uint8_t vdev_id);
  1344. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  1345. uint8_t pdev_id,
  1346. uint8_t vdev_id);
  1347. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  1348. tx_pause_callback);
  1349. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  1350. void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl);
  1351. bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl,
  1352. uint8_t vdev_id);
  1353. };
  1354. /**
  1355. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  1356. * @register_tx_flow_control: Register tx flow control callback
  1357. * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev
  1358. * @set_vdev_os_queue_status: Set vdev queue status
  1359. * @deregister_tx_flow_control_cb: Deregister tx flow control callback
  1360. * @flow_control_cb: Call osif flow control callback
  1361. * @get_tx_resource: Get tx resources and comapre with watermark
  1362. * @ll_set_tx_pause_q_depth: set pause queue depth
  1363. * @vdev_flush: Flush all packets on a particular vdev
  1364. * @vdev_pause: Pause a particular vdev
  1365. * @vdev_unpause: Unpause a particular vdev
  1366. *
  1367. * Function pointers for operations related to flow control
  1368. */
  1369. struct cdp_lflowctl_ops {
  1370. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  1371. int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl,
  1372. uint8_t pdev_id,
  1373. tx_pause_callback flowcontrol);
  1374. int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl,
  1375. uint8_t vdev_id, uint32_t chan_freq);
  1376. int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl,
  1377. uint8_t vdev_id,
  1378. enum netif_action_type action);
  1379. #else
  1380. int (*register_tx_flow_control)(
  1381. struct cdp_soc_t *soc_hdl,
  1382. uint8_t vdev_id,
  1383. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  1384. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  1385. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  1386. int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl,
  1387. uint8_t vdev_id);
  1388. void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1389. bool tx_resume);
  1390. bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1391. struct qdf_mac_addr peer_addr,
  1392. unsigned int low_watermark,
  1393. unsigned int high_watermark_offset);
  1394. int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1395. int pause_q_depth);
  1396. void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1397. void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1398. uint32_t reason, uint32_t pause_type);
  1399. void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1400. uint32_t reason, uint32_t pause_type);
  1401. };
  1402. /**
  1403. * struct cdp_throttle_ops - mcl throttle ops
  1404. * @throttle_init_period: handler to initialize tx throttle time
  1405. * @throttle_set_level: handler to set tx throttle level
  1406. */
  1407. struct cdp_throttle_ops {
  1408. void (*throttle_init_period)(struct cdp_soc_t *soc_hdl,
  1409. uint8_t pdev_id, int period,
  1410. uint8_t *dutycycle_level);
  1411. void (*throttle_set_level)(struct cdp_soc_t *soc_hdl,
  1412. uint8_t pdev_id, int level);
  1413. };
  1414. #endif
  1415. #ifdef IPA_OFFLOAD
  1416. /**
  1417. * struct cdp_ipa_ops - mcl ipa data path ops
  1418. * @ipa_get_resource:
  1419. * @ipa_set_doorbell_paddr:
  1420. * @ipa_set_active:
  1421. * @ipa_op_response:
  1422. * @ipa_register_op_cb:
  1423. * @ipa_get_stat:
  1424. * @ipa_tx_data_frame:
  1425. * @ipa_tx_buf_smmu_mapping: Create SMMU mappings for Tx
  1426. * @ipa_tx_buf_smmu_unmapping: Release SMMU mappings for Tx
  1427. * buffers to IPA
  1428. */
  1429. struct cdp_ipa_ops {
  1430. QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl,
  1431. uint8_t pdev_id);
  1432. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl,
  1433. uint8_t pdev_id);
  1434. QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1435. bool uc_active, bool is_tx);
  1436. QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl,
  1437. uint8_t pdev_id, uint8_t *op_msg);
  1438. QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl,
  1439. uint8_t pdev_id,
  1440. void (*ipa_uc_op_cb_type)
  1441. (uint8_t *op_msg, void *osif_ctxt),
  1442. void *usr_ctxt);
  1443. void (*ipa_deregister_op_cb)(struct cdp_soc_t *soc_hdl,
  1444. uint8_t pdev_id);
  1445. QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1446. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl,
  1447. uint8_t vdev_id, qdf_nbuf_t skb);
  1448. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  1449. uint32_t value);
  1450. #ifdef FEATURE_METERING
  1451. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl,
  1452. uint8_t pdev_id,
  1453. uint8_t reset_stats);
  1454. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl,
  1455. uint8_t pdev_id, uint64_t quota_bytes);
  1456. #endif
  1457. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl,
  1458. uint8_t pdev_id);
  1459. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl,
  1460. uint8_t pdev_id);
  1461. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  1462. defined(CONFIG_IPA_WDI_UNIFIED_API)
  1463. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1464. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1465. void *ipa_wdi_meter_notifier_cb,
  1466. uint32_t ipa_desc_size, void *ipa_priv,
  1467. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1468. uint32_t *rx_pipe_handle, bool is_smmu_enabled,
  1469. qdf_ipa_sys_connect_params_t *sys_in,
  1470. bool over_gsi);
  1471. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  1472. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1473. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1474. void *ipa_wdi_meter_notifier_cb,
  1475. uint32_t ipa_desc_size, void *ipa_priv,
  1476. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1477. uint32_t *rx_pipe_handle);
  1478. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  1479. QDF_STATUS (*ipa_cleanup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1480. uint32_t tx_pipe_handle,
  1481. uint32_t rx_pipe_handle);
  1482. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  1483. qdf_ipa_client_type_t prod_client,
  1484. qdf_ipa_client_type_t cons_client,
  1485. uint8_t session_id, bool is_ipv6_enabled);
  1486. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  1487. QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl,
  1488. uint8_t pdev_id);
  1489. QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl,
  1490. uint8_t pdev_id);
  1491. QDF_STATUS (*ipa_set_perf_level)(int client,
  1492. uint32_t max_supported_bw_mbps);
  1493. bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1494. qdf_nbuf_t nbuf, bool *fwd_success);
  1495. QDF_STATUS (*ipa_tx_buf_smmu_mapping)(struct cdp_soc_t *soc_hdl,
  1496. uint8_t pdev_id);
  1497. QDF_STATUS (*ipa_tx_buf_smmu_unmapping)(struct cdp_soc_t *soc_hdl,
  1498. uint8_t pdev_id);
  1499. };
  1500. #endif
  1501. #ifdef DP_POWER_SAVE
  1502. /**
  1503. * struct cdp_tx_delay_ops - mcl tx delay ops
  1504. * @tx_delay: handler to get tx packet delay
  1505. * @tx_delay_hist: handler to get tx packet delay histogram
  1506. * @tx_packet_count: handler to get tx packet count
  1507. * @tx_set_compute_interval: update compute interval period for TSM stats
  1508. *
  1509. * Function pointer for operations related to tx delay.
  1510. */
  1511. struct cdp_tx_delay_ops {
  1512. void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1513. uint32_t *queue_delay_microsec,
  1514. uint32_t *tx_delay_microsec, int category);
  1515. void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1516. uint16_t *bin_values, int category);
  1517. void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1518. uint16_t *out_packet_count,
  1519. uint16_t *out_packet_loss_count, int category);
  1520. void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl,
  1521. uint8_t pdev_id, uint32_t interval);
  1522. };
  1523. /**
  1524. * struct cdp_bus_ops - mcl bus suspend/resume ops
  1525. * @bus_suspend: handler for bus suspend
  1526. * @bus_resume: handler for bus resume
  1527. * @process_wow_ack_rsp: handler for wow ack response
  1528. * @process_target_suspend_req: handler for target suspend request
  1529. */
  1530. struct cdp_bus_ops {
  1531. QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1532. QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1533. void (*process_wow_ack_rsp)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1534. void (*process_target_suspend_req)(struct cdp_soc_t *soc_hdl,
  1535. uint8_t pdev_id);
  1536. };
  1537. #endif
  1538. #ifdef RECEIVE_OFFLOAD
  1539. /**
  1540. * struct cdp_rx_offld_ops - mcl host receive offload ops
  1541. * @register_rx_offld_flush_cb:
  1542. * @deregister_rx_offld_flush_cb:
  1543. */
  1544. struct cdp_rx_offld_ops {
  1545. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1546. void (*deregister_rx_offld_flush_cb)(void);
  1547. };
  1548. #endif
  1549. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1550. /**
  1551. * struct cdp_cfr_ops - host cfr ops
  1552. * @txrx_cfr_filter: Handler to configure host rx monitor status ring
  1553. * @txrx_get_cfr_rcc: Handler to get CFR mode
  1554. * @txrx_set_cfr_rcc: Handler to enable/disable CFR mode
  1555. * @txrx_get_cfr_dbg_stats: Handler to get debug statistics for CFR mode
  1556. * @txrx_clear_cfr_dbg_stats: Handler to clear debug statistics for CFR mode
  1557. * @txrx_enable_mon_reap_timer: Enable/Disable reap timer of monitor status ring
  1558. */
  1559. struct cdp_cfr_ops {
  1560. void (*txrx_cfr_filter)(struct cdp_soc_t *soc_hdl,
  1561. uint8_t pdev_id,
  1562. bool enable,
  1563. struct cdp_monitor_filter *filter_val);
  1564. bool (*txrx_get_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1565. uint8_t pdev_id);
  1566. void (*txrx_set_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1567. uint8_t pdev_id,
  1568. bool enable);
  1569. void (*txrx_get_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1570. uint8_t pdev_id,
  1571. struct cdp_cfr_rcc_stats *buf);
  1572. void (*txrx_clear_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1573. uint8_t pdev_id);
  1574. void (*txrx_enable_mon_reap_timer)(struct cdp_soc_t *soc_hdl,
  1575. uint8_t pdev_id,
  1576. bool enable);
  1577. };
  1578. #endif
  1579. #ifdef WLAN_SUPPORT_MSCS
  1580. /**
  1581. * struct cdp_mscs_ops - data path ops for MSCS
  1582. * @mscs_peer_lookup_n_get_priority:
  1583. */
  1584. struct cdp_mscs_ops {
  1585. int (*mscs_peer_lookup_n_get_priority)(struct cdp_soc_t *soc,
  1586. uint8_t *src_mac,
  1587. uint8_t *dst_mac,
  1588. qdf_nbuf_t nbuf);
  1589. };
  1590. #endif
  1591. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1592. /**
  1593. * struct cdp_mesh_latency_ops - data path ops for Mesh latency
  1594. * @mesh_latency_update_peer_parameter:
  1595. */
  1596. struct cdp_mesh_latency_ops {
  1597. QDF_STATUS (*mesh_latency_update_peer_parameter)(
  1598. struct cdp_soc_t *soc,
  1599. uint8_t *dest_mac, uint32_t service_interval_dl,
  1600. uint32_t burst_size_dl, uint32_t service_interval_ul,
  1601. uint32_t burst_size_ul, uint16_t priority,
  1602. uint8_t add_or_sub);
  1603. };
  1604. #endif
  1605. struct cdp_ops {
  1606. struct cdp_cmn_ops *cmn_drv_ops;
  1607. struct cdp_ctrl_ops *ctrl_ops;
  1608. struct cdp_me_ops *me_ops;
  1609. struct cdp_mon_ops *mon_ops;
  1610. struct cdp_host_stats_ops *host_stats_ops;
  1611. struct cdp_wds_ops *wds_ops;
  1612. struct cdp_raw_ops *raw_ops;
  1613. struct cdp_pflow_ops *pflow_ops;
  1614. #ifdef DP_PEER_EXTENDED_API
  1615. struct cdp_misc_ops *misc_ops;
  1616. struct cdp_peer_ops *peer_ops;
  1617. struct cdp_ocb_ops *ocb_ops;
  1618. struct cdp_mob_stats_ops *mob_stats_ops;
  1619. struct cdp_pmf_ops *pmf_ops;
  1620. #endif
  1621. #ifdef DP_FLOW_CTL
  1622. struct cdp_cfg_ops *cfg_ops;
  1623. struct cdp_flowctl_ops *flowctl_ops;
  1624. struct cdp_lflowctl_ops *l_flowctl_ops;
  1625. struct cdp_throttle_ops *throttle_ops;
  1626. #endif
  1627. #ifdef DP_POWER_SAVE
  1628. struct cdp_bus_ops *bus_ops;
  1629. struct cdp_tx_delay_ops *delay_ops;
  1630. #endif
  1631. #ifdef IPA_OFFLOAD
  1632. struct cdp_ipa_ops *ipa_ops;
  1633. #endif
  1634. #ifdef RECEIVE_OFFLOAD
  1635. struct cdp_rx_offld_ops *rx_offld_ops;
  1636. #endif
  1637. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1638. struct cdp_cfr_ops *cfr_ops;
  1639. #endif
  1640. #ifdef WLAN_SUPPORT_MSCS
  1641. struct cdp_mscs_ops *mscs_ops;
  1642. #endif
  1643. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1644. struct cdp_mesh_latency_ops *mesh_latency_ops;
  1645. #endif
  1646. };
  1647. #endif