cdp_txrx_ops.h 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865
  1. /*
  2. * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #include <wmi_unified_api.h>
  32. #include <wdi_event_api.h>
  33. #ifdef IPA_OFFLOAD
  34. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  35. defined(CONFIG_IPA_WDI_UNIFIED_API)
  36. #include <qdf_ipa_wdi3.h>
  37. #else
  38. #include <qdf_ipa.h>
  39. #endif
  40. #endif
  41. /**
  42. * bitmap values to indicate special handling of peer_delete
  43. */
  44. #define CDP_PEER_DELETE_NO_SPECIAL 0
  45. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  46. struct hif_opaque_softc;
  47. /* same as ieee80211_nac_param */
  48. enum cdp_nac_param_cmd {
  49. /* IEEE80211_NAC_PARAM_ADD */
  50. CDP_NAC_PARAM_ADD = 1,
  51. /* IEEE80211_NAC_PARAM_DEL */
  52. CDP_NAC_PARAM_DEL,
  53. /* IEEE80211_NAC_PARAM_LIST */
  54. CDP_NAC_PARAM_LIST,
  55. };
  56. #define CDP_DELBA_INTERVAL_MS 3000
  57. /**
  58. * enum cdp_delba_rcode - CDP reason code for sending DELBA
  59. * @CDP_DELBA_REASON_NONE: None
  60. * @CDP_DELBA_2K_JUMP: Sending DELBA from 2k_jump_handle
  61. */
  62. enum cdp_delba_rcode {
  63. CDP_DELBA_REASON_NONE = 0,
  64. CDP_DELBA_2K_JUMP,
  65. };
  66. /**
  67. * enum vdev_peer_protocol_enter_exit - whether ingress or egress
  68. * @CDP_VDEV_PEER_PROTOCOL_IS_INGRESS: ingress
  69. * @CDP_VDEV_PEER_PROTOCOL_IS_EGRESS: egress
  70. *
  71. * whether ingress or egress
  72. */
  73. enum vdev_peer_protocol_enter_exit {
  74. CDP_VDEV_PEER_PROTOCOL_IS_INGRESS,
  75. CDP_VDEV_PEER_PROTOCOL_IS_EGRESS
  76. };
  77. /**
  78. * enum vdev_peer_protocol_tx_rx - whether tx or rx
  79. * @CDP_VDEV_PEER_PROTOCOL_IS_TX: tx
  80. * @CDP_VDEV_PEER_PROTOCOL_IS_RX: rx
  81. *
  82. * whether tx or rx
  83. */
  84. enum vdev_peer_protocol_tx_rx {
  85. CDP_VDEV_PEER_PROTOCOL_IS_TX,
  86. CDP_VDEV_PEER_PROTOCOL_IS_RX
  87. };
  88. /**
  89. * enum vdev_ll_conn_actions - Actions to informvdev about
  90. * low latency connection.
  91. * @CDP_VDEV_LL_CONN_ADD: Add Low latency connection
  92. * @CDP_VDEV_LL_CONN_DEL: Delete Low latency connection
  93. */
  94. enum vdev_ll_conn_actions {
  95. CDP_VDEV_LL_CONN_ADD,
  96. CDP_VDEV_LL_CONN_DEL
  97. };
  98. /******************************************************************************
  99. *
  100. * Control Interface (A Interface)
  101. *
  102. *****************************************************************************/
  103. struct cdp_cmn_ops {
  104. QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  105. int (*txrx_pdev_attach_target)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  106. QDF_STATUS (*txrx_vdev_attach)
  107. (struct cdp_soc_t *soc, uint8_t pdev_id,
  108. struct cdp_vdev_info *vdev_info);
  109. QDF_STATUS
  110. (*txrx_vdev_detach)(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
  111. ol_txrx_vdev_delete_cb callback,
  112. void *cb_context);
  113. QDF_STATUS (*txrx_pdev_attach)
  114. (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev,
  115. qdf_device_t osdev, uint8_t pdev_id);
  116. int (*txrx_pdev_post_attach)(struct cdp_soc_t *soc, uint8_t pdev_id);
  117. void
  118. (*txrx_pdev_pre_detach)(struct cdp_soc_t *soc, uint8_t pdev_id,
  119. int force);
  120. QDF_STATUS
  121. (*txrx_pdev_detach)(struct cdp_soc_t *psoc, uint8_t pdev_id,
  122. int force);
  123. /**
  124. * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
  125. * @soc: soc dp handle
  126. * @pdev_id: id of Dp pdev handle
  127. * @force: Force deinit or not
  128. *
  129. * Return: QDF_STATUS
  130. */
  131. QDF_STATUS
  132. (*txrx_pdev_deinit)(struct cdp_soc_t *soc, uint8_t pdev_id,
  133. int force);
  134. QDF_STATUS
  135. (*txrx_peer_create)
  136. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  137. uint8_t *peer_mac_addr, enum cdp_peer_type peer_type);
  138. QDF_STATUS
  139. (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  140. uint8_t *peer_mac,
  141. struct cdp_peer_setup_info *setup_info);
  142. QDF_STATUS
  143. (*txrx_cp_peer_del_response)
  144. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  145. uint8_t *peer_mac_addr);
  146. QDF_STATUS
  147. (*txrx_peer_teardown)
  148. (struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *peer_mac);
  149. int (*txrx_peer_add_ast)
  150. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  151. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  152. uint32_t flags);
  153. int (*txrx_peer_update_ast)
  154. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  155. uint8_t *mac_addr, uint32_t flags);
  156. bool (*txrx_peer_get_ast_info_by_soc)
  157. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  158. struct cdp_ast_entry_info *ast_entry_info);
  159. bool (*txrx_peer_get_ast_info_by_pdev)
  160. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  161. uint8_t pdev_id,
  162. struct cdp_ast_entry_info *ast_entry_info);
  163. QDF_STATUS (*txrx_peer_ast_delete_by_soc)
  164. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  165. txrx_ast_free_cb callback,
  166. void *cookie);
  167. QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
  168. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  169. uint8_t pdev_id,
  170. txrx_ast_free_cb callback,
  171. void *cookie);
  172. QDF_STATUS
  173. (*txrx_peer_delete)(struct cdp_soc_t *soc, uint8_t vdev_id,
  174. uint8_t *peer_mac, uint32_t bitmap);
  175. QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_soc_t *soc,
  176. uint8_t vdev_id,
  177. uint8_t smart_monitor);
  178. void (*txrx_peer_delete_sync)(struct cdp_soc_t *soc, uint8_t vdev_id,
  179. uint8_t *peer_mac,
  180. QDF_STATUS(*delete_cb)(
  181. uint8_t vdev_id,
  182. uint32_t peerid_cnt,
  183. uint16_t *peerid_list),
  184. uint32_t bitmap);
  185. void (*txrx_peer_unmap_sync_cb_set)(struct cdp_soc_t *soc_hdl,
  186. uint8_t pdev_id,
  187. ol_txrx_peer_unmap_sync_cb
  188. peer_unmap_sync);
  189. QDF_STATUS
  190. (*txrx_get_peer_mac_from_peer_id)
  191. (struct cdp_soc_t *cdp_soc,
  192. uint32_t peer_id, uint8_t *peer_mac);
  193. void
  194. (*txrx_vdev_tx_lock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  195. void
  196. (*txrx_vdev_tx_unlock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  197. QDF_STATUS
  198. (*txrx_ath_getstats)(struct cdp_soc_t *soc, uint8_t id,
  199. struct cdp_dev_stats *stats, uint8_t type);
  200. QDF_STATUS
  201. (*txrx_set_gid_flag)(struct cdp_soc_t *soc, uint8_t pdev_id,
  202. u_int8_t *mem_status,
  203. u_int8_t *user_position);
  204. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_soc_t *soc,
  205. uint8_t pdev_id);
  206. QDF_STATUS
  207. (*txrx_if_mgmt_drain)(struct cdp_soc_t *soc, uint8_t pdev_id,
  208. int force);
  209. QDF_STATUS
  210. (*txrx_set_curchan)(struct cdp_soc_t *soc, uint8_t pdev_id,
  211. uint32_t chan_mhz);
  212. QDF_STATUS
  213. (*txrx_set_privacy_filters)
  214. (struct cdp_soc_t *soc, uint8_t vdev_id, void *filter,
  215. uint32_t num);
  216. uint32_t (*txrx_get_cfg)(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg);
  217. /********************************************************************
  218. * Data Interface (B Interface)
  219. ********************************************************************/
  220. QDF_STATUS
  221. (*txrx_vdev_register)(struct cdp_soc_t *soc, uint8_t vdev_id,
  222. ol_osif_vdev_handle osif_vdev,
  223. struct ol_txrx_ops *txrx_ops);
  224. int (*txrx_mgmt_send)(struct cdp_soc_t *soc, uint8_t vdev_id,
  225. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  226. int (*txrx_mgmt_send_ext)(struct cdp_soc_t *soc, uint8_t vdev_id,
  227. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  228. uint8_t use_6mbps, uint16_t chanfreq);
  229. /**
  230. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  231. * callback function
  232. */
  233. QDF_STATUS
  234. (*txrx_mgmt_tx_cb_set)(struct cdp_soc_t *soc, uint8_t pdev_id,
  235. uint8_t type,
  236. ol_txrx_mgmt_tx_cb download_cb,
  237. ol_txrx_mgmt_tx_cb ota_ack_cb,
  238. void *ctxt);
  239. /**
  240. * ol_txrx_data_tx_cb - Function registered with the data path
  241. * that is called when tx frames marked as "no free" are
  242. * done being transmitted
  243. */
  244. void (*txrx_data_tx_cb_set)(struct cdp_soc_t *soc, uint8_t vdev_id,
  245. ol_txrx_data_tx_cb callback, void *ctxt);
  246. qdf_nbuf_t (*tx_send_exc)
  247. (ol_txrx_soc_handle soc, uint8_t vdev_id, qdf_nbuf_t msdu_list,
  248. struct cdp_tx_exception_metadata *tx_exc_metadata);
  249. /*******************************************************************
  250. * Statistics and Debugging Interface (C Interface)
  251. ********************************************************************/
  252. int (*txrx_aggr_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  253. int max_subfrms_ampdu,
  254. int max_subfrms_amsdu);
  255. A_STATUS
  256. (*txrx_fw_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  257. struct ol_txrx_stats_req *req,
  258. bool per_vdev, bool response_expected);
  259. int (*txrx_debug)(struct cdp_soc_t *soc, uint8_t vdev_id,
  260. int debug_specs);
  261. QDF_STATUS
  262. (*txrx_fw_stats_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  263. uint8_t cfg_stats_type, uint32_t cfg_val);
  264. void (*txrx_print_level_set)(unsigned level);
  265. /**
  266. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  267. * @soc: datapath soc handle
  268. * @vdev_id: vdev id
  269. *
  270. * Return: vdev mac address
  271. */
  272. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_soc_t *soc,
  273. uint8_t vdev_id);
  274. /**
  275. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  276. * @soc: datapath soc handle
  277. * @vdev_id: vdev id
  278. *
  279. * Return: Handle to control pdev
  280. */
  281. struct cdp_cfg *(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_soc_t *soc,
  282. uint8_t vdev_id);
  283. /**
  284. * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
  285. * @soc: datapath soc handle
  286. * @pdev: pdev id
  287. *
  288. * Return: vdev_id
  289. */
  290. uint8_t (*txrx_get_mon_vdev_from_pdev)(struct cdp_soc_t *soc,
  291. uint8_t pdev_id);
  292. void (*txrx_soc_detach)(struct cdp_soc_t *soc);
  293. /**
  294. * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
  295. * @soc: Opaque Dp handle
  296. *
  297. * Return None
  298. */
  299. void (*txrx_soc_deinit)(struct cdp_soc_t *soc);
  300. /**
  301. * txrx_soc_init() - Initialize dp soc and dp ring memory
  302. * @soc: Opaque Dp handle
  303. * @ctrl_psoc: Opaque Cp handle
  304. * @htchdl: Opaque htc handle
  305. * @hifhdl: Opaque hif handle
  306. *
  307. * Return: None
  308. */
  309. void *(*txrx_soc_init)(struct cdp_soc_t *soc,
  310. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  311. struct hif_opaque_softc *hif_handle,
  312. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  313. struct ol_if_ops *ol_ops, uint16_t device_id);
  314. QDF_STATUS (*txrx_pdev_init)(ol_txrx_soc_handle soc,
  315. HTC_HANDLE htc_handle,
  316. qdf_device_t qdf_osdev,
  317. uint8_t pdev_id);
  318. /**
  319. * txrx_tso_soc_attach() - TSO attach handler triggered during
  320. * dynamic tso activation
  321. * @soc: Opaque Dp handle
  322. *
  323. * Return: QDF status
  324. */
  325. QDF_STATUS (*txrx_tso_soc_attach)(struct cdp_soc_t *soc);
  326. /**
  327. * txrx_tso_soc_detach() - TSO detach handler triggered during
  328. * dynamic tso de-activation
  329. * @soc: Opaque Dp handle
  330. *
  331. * Return: QDF status
  332. */
  333. QDF_STATUS (*txrx_tso_soc_detach)(struct cdp_soc_t *soc);
  334. int (*addba_resp_tx_completion)(struct cdp_soc_t *cdp_soc,
  335. uint8_t *peer_mac,
  336. uint16_t vdev_id, uint8_t tid,
  337. int status);
  338. int (*addba_requestprocess)(struct cdp_soc_t *cdp_soc,
  339. uint8_t *peer_mac,
  340. uint16_t vdev_id,
  341. uint8_t dialogtoken,
  342. uint16_t tid, uint16_t batimeout,
  343. uint16_t buffersize,
  344. uint16_t startseqnum);
  345. QDF_STATUS
  346. (*addba_responsesetup)(struct cdp_soc_t *cdp_soc,
  347. uint8_t *peer_mac,
  348. uint16_t vdev_id, uint8_t tid,
  349. uint8_t *dialogtoken, uint16_t *statuscode,
  350. uint16_t *buffersize, uint16_t *batimeout);
  351. int (*delba_process)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  352. uint16_t vdev_id, int tid, uint16_t reasoncode);
  353. /**
  354. * delba_tx_completion() - Indicate delba tx status
  355. * @cdp_soc: soc handle
  356. * @peer_mac: Peer mac address
  357. * @vdev_id: vdev id
  358. * @tid: Tid number
  359. * @status: Tx completion status
  360. *
  361. * Return: 0 on Success, 1 on failure
  362. */
  363. int (*delba_tx_completion)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  364. uint16_t vdev_id,
  365. uint8_t tid, int status);
  366. QDF_STATUS
  367. (*set_addba_response)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  368. uint16_t vdev_id, uint8_t tid,
  369. uint16_t statuscode);
  370. QDF_STATUS
  371. (*set_vdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  372. uint8_t vdev_id, uint8_t map_id);
  373. int (*txrx_get_total_per)(struct cdp_soc_t *soc, uint8_t pdev_id);
  374. void (*flush_cache_rx_queue)(void);
  375. QDF_STATUS (*set_pdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  376. uint8_t pdev_id,
  377. uint8_t map_id,
  378. uint8_t tos, uint8_t tid);
  379. QDF_STATUS (*txrx_stats_request)(struct cdp_soc_t *soc_handle,
  380. uint8_t vdev_id,
  381. struct cdp_txrx_stats_req *req);
  382. QDF_STATUS (*display_stats)(struct cdp_soc_t *psoc, uint16_t value,
  383. enum qdf_stats_verbosity_level level);
  384. QDF_STATUS (*txrx_intr_attach)(struct cdp_soc_t *soc_handle);
  385. void (*txrx_intr_detach)(struct cdp_soc_t *soc_handle);
  386. QDF_STATUS (*set_pn_check)(struct cdp_soc_t *soc_handle,
  387. uint8_t vdev_id, uint8_t *peermac,
  388. enum cdp_sec_type sec_type,
  389. uint32_t *rx_pn);
  390. QDF_STATUS(*set_key_sec_type)(struct cdp_soc_t *soc_handle,
  391. uint8_t vdev_id, uint8_t *peermac,
  392. enum cdp_sec_type sec_type,
  393. bool is_unicast);
  394. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  395. struct cdp_config_params *params);
  396. void *(*get_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  397. void (*set_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  398. void *dp_hdl);
  399. void *(*get_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  400. uint8_t vdev_id);
  401. QDF_STATUS (*set_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  402. uint8_t vdev_id,
  403. uint16_t size);
  404. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  405. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  406. void *dp_txrx_handle);
  407. QDF_STATUS (*map_pdev_to_lmac)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  408. uint32_t lmac_id);
  409. QDF_STATUS (*handle_mode_change)(ol_txrx_soc_handle soc,
  410. uint8_t pdev_id, uint32_t lmac_id);
  411. QDF_STATUS (*set_pdev_status_down)(struct cdp_soc_t *soc_handle,
  412. uint8_t pdev_id, bool is_pdev_down);
  413. QDF_STATUS (*txrx_peer_reset_ast)
  414. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
  415. uint8_t *peer_macaddr, uint8_t vdev_id);
  416. QDF_STATUS (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  417. uint8_t vdev_id);
  418. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  419. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  420. uint8_t ac, uint32_t value);
  421. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  422. uint8_t ac, uint32_t *value);
  423. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  424. uint32_t num_peers,
  425. uint32_t max_ast_index,
  426. uint8_t peer_map_unmap_v);
  427. QDF_STATUS (*set_soc_param)(ol_txrx_soc_handle soc,
  428. enum cdp_soc_param_t param,
  429. uint32_t value);
  430. ol_txrx_tx_fp tx_send;
  431. /**
  432. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  433. * to deliver pkt to stack.
  434. * @soc: datapath soc handle
  435. * @vdev: vdev id
  436. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  437. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  438. */
  439. void (*txrx_get_os_rx_handles_from_vdev)
  440. (ol_txrx_soc_handle soc,
  441. uint8_t vdev_id,
  442. ol_txrx_rx_fp *stack_fn,
  443. ol_osif_vdev_handle *osif_vdev);
  444. void (*set_rate_stats_ctx)(struct cdp_soc_t *soc,
  445. void *ctx);
  446. int (*txrx_classify_update)
  447. (struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t skb,
  448. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  449. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  450. enum cdp_capabilities dp_caps);
  451. void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
  452. QDF_STATUS (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
  453. uint8_t pdev_id,
  454. void *buf);
  455. void* (*txrx_peer_get_rdkstats_ctx)(struct cdp_soc_t *soc,
  456. uint8_t vdev_id,
  457. uint8_t *mac_addr);
  458. QDF_STATUS (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
  459. uint8_t pdev_id);
  460. QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_soc_t *soc,
  461. uint8_t pdev_id,
  462. uint8_t pcp, uint8_t tid);
  463. QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_soc_t *soc,
  464. uint8_t vdev_id,
  465. uint8_t pcp, uint8_t tid);
  466. #ifdef QCA_MULTIPASS_SUPPORT
  467. QDF_STATUS (*set_vlan_groupkey)(struct cdp_soc_t *soc, uint8_t vdev_id,
  468. uint16_t vlan_id, uint16_t group_key);
  469. #endif
  470. uint16_t (*get_peer_mac_list)
  471. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  472. u_int8_t newmac[][QDF_MAC_ADDR_SIZE], uint16_t mac_cnt,
  473. bool limit);
  474. #ifdef QCA_SUPPORT_WDS_EXTENDED
  475. uint16_t (*get_wds_ext_peer_id)(ol_txrx_soc_handle soc,
  476. uint8_t vdev_id,
  477. uint8_t *mac);
  478. QDF_STATUS (*set_wds_ext_peer_rx)(ol_txrx_soc_handle soc,
  479. uint8_t vdev_id,
  480. uint8_t *mac,
  481. ol_txrx_rx_fp rx,
  482. ol_osif_peer_handle osif_peer);
  483. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  484. void (*txrx_drain)(ol_txrx_soc_handle soc);
  485. int (*get_free_desc_poolsize)(struct cdp_soc_t *soc);
  486. #ifdef WLAN_SYSFS_DP_STATS
  487. QDF_STATUS (*txrx_sysfs_fill_stats)(ol_txrx_soc_handle soc,
  488. char *buf, uint32_t buf_size);
  489. QDF_STATUS (*txrx_sysfs_set_stat_type)(ol_txrx_soc_handle soc,
  490. uint32_t stat_type,
  491. uint32_t mac_id);
  492. #endif /* WLAN_SYSFS_DP_STATS */
  493. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  494. void (*set_pkt_capture_mode)(struct cdp_soc_t *soc, bool val);
  495. #endif
  496. };
  497. struct cdp_ctrl_ops {
  498. int
  499. (*txrx_mempools_attach)(ol_txrx_soc_handle dp_soc);
  500. int
  501. (*txrx_update_filter_neighbour_peers)(
  502. struct cdp_soc_t *soc, uint8_t vdev_id,
  503. uint32_t cmd, uint8_t *macaddr);
  504. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  505. /**
  506. * @brief Update the authorize peer object at association time
  507. * @details
  508. * For the host-based implementation of rate-control, it
  509. * updates the peer/node-related parameters within rate-control
  510. * context of the peer at association.
  511. *
  512. * @param soc_hdl - pointer to the soc object
  513. * @param vdev_id - id of the virtual object
  514. * @param peer_mac - mac address of the node's object
  515. * @authorize - either to authorize or unauthorize peer
  516. *
  517. * @return QDF_STATUS
  518. */
  519. QDF_STATUS
  520. (*txrx_peer_authorize)(struct cdp_soc_t *soc_hdl,
  521. uint8_t vdev_id,
  522. uint8_t *peer_mac,
  523. u_int32_t authorize);
  524. bool
  525. (*txrx_peer_get_authorize)(struct cdp_soc_t *soc_hdl,
  526. uint8_t vdev_id,
  527. uint8_t *peer_mac);
  528. void (*tx_flush_buffers)(struct cdp_soc_t *soc, uint8_t vdev_id);
  529. int (*txrx_is_target_ar900b)(struct cdp_soc_t *soc_hdl);
  530. QDF_STATUS
  531. (*txrx_set_vdev_param)(struct cdp_soc_t *soc, uint8_t vdev_id,
  532. enum cdp_vdev_param_type param,
  533. cdp_config_param_type val);
  534. /**
  535. * @brief Set the reo dest ring num of the radio
  536. * @details
  537. * Set the reo destination ring no on which we will receive
  538. * pkts for this radio.
  539. *
  540. * @txrx_soc - soc handle
  541. * @param pdev_id - id of physical device
  542. * @return the reo destination ring number
  543. * @param reo_dest_ring_num - value ranges between 1 - 4
  544. */
  545. QDF_STATUS (*txrx_set_pdev_reo_dest)(
  546. struct cdp_soc_t *txrx_soc,
  547. uint8_t pdev_id,
  548. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  549. /**
  550. * @brief Get the reo dest ring num of the radio
  551. * @details
  552. * Get the reo destination ring no on which we will receive
  553. * pkts for this radio.
  554. *
  555. * @txrx_soc - soc handle
  556. * @param pdev_id - id of physical device
  557. * @return the reo destination ring number
  558. */
  559. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  560. struct cdp_soc_t *txrx_soc,
  561. uint8_t pdev_id);
  562. int (*txrx_wdi_event_sub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  563. wdi_event_subscribe *event_cb_sub,
  564. uint32_t event);
  565. int (*txrx_wdi_event_unsub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  566. wdi_event_subscribe *event_cb_sub,
  567. uint32_t event);
  568. int (*txrx_get_sec_type)(ol_txrx_soc_handle soc, uint8_t vdev_id,
  569. uint8_t *peer_mac, uint8_t sec_idx);
  570. QDF_STATUS
  571. (*txrx_update_mgmt_txpow_vdev)(struct cdp_soc_t *soc,
  572. uint8_t vdev_id,
  573. uint8_t subtype, uint8_t tx_power);
  574. /**
  575. * txrx_set_pdev_param() - callback to set pdev parameter
  576. * @soc: opaque soc handle
  577. * @pdev_id:id of data path pdev handle
  578. * @val: value of pdev_tx_capture
  579. *
  580. * Return: status: 0 - Success, non-zero: Failure
  581. */
  582. QDF_STATUS (*txrx_set_pdev_param)(struct cdp_soc_t *soc,
  583. uint8_t pdev_id,
  584. enum cdp_pdev_param_type type,
  585. cdp_config_param_type val);
  586. QDF_STATUS (*txrx_get_pdev_param)(struct cdp_soc_t *soc,
  587. uint8_t pdev_id,
  588. enum cdp_pdev_param_type type,
  589. cdp_config_param_type *val);
  590. QDF_STATUS (*txrx_set_peer_param)(struct cdp_soc_t *soc,
  591. uint8_t vdev_id, uint8_t *peer_mac,
  592. enum cdp_peer_param_type param,
  593. cdp_config_param_type val);
  594. QDF_STATUS (*txrx_get_peer_param)(struct cdp_soc_t *soc,
  595. uint8_t vdev_id, uint8_t *peer_mac,
  596. enum cdp_peer_param_type param,
  597. cdp_config_param_type *val);
  598. void * (*txrx_get_pldev)(struct cdp_soc_t *soc, uint8_t pdev_id);
  599. void (*txrx_peer_flush_frags)(struct cdp_soc_t *soc, uint8_t vdev_id,
  600. uint8_t *peer_mac);
  601. #ifdef VDEV_PEER_PROTOCOL_COUNT
  602. void (*txrx_peer_protocol_cnt)(struct cdp_soc_t *soc,
  603. int8_t vdev_id,
  604. qdf_nbuf_t nbuf,
  605. bool is_egress,
  606. bool is_rx);
  607. #endif
  608. #ifdef ATH_SUPPORT_NAC_RSSI
  609. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_soc_t *cdp_soc,
  610. uint8_t vdev_id,
  611. enum cdp_nac_param_cmd cmd,
  612. char *bssid,
  613. char *client_macaddr,
  614. uint8_t chan_num);
  615. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_soc_t *cdp_soc,
  616. uint8_t vdev_id,
  617. char *macaddr,
  618. uint8_t *rssi);
  619. #endif
  620. #ifdef WLAN_SUPPORT_SCS
  621. QDF_STATUS
  622. (*txrx_enable_scs_params) (
  623. struct cdp_soc_t *soc, struct qdf_mac_addr
  624. *macaddr,
  625. uint8_t vdev_id,
  626. bool is_active);
  627. QDF_STATUS
  628. (*txrx_record_scs_params) (
  629. struct cdp_soc_t *soc, struct qdf_mac_addr
  630. *macaddr,
  631. uint8_t vdev_id,
  632. struct cdp_scs_params *scs_params,
  633. uint8_t entry_ctr,
  634. uint8_t scs_sessions);
  635. #endif
  636. #ifdef WLAN_SUPPORT_MSCS
  637. QDF_STATUS
  638. (*txrx_record_mscs_params) (
  639. struct cdp_soc_t *soc, uint8_t *macaddr,
  640. uint8_t vdev_id,
  641. struct cdp_mscs_params *mscs_params,
  642. bool active);
  643. #endif
  644. QDF_STATUS
  645. (*set_key)(struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *mac,
  646. bool is_unicast, uint32_t *key);
  647. QDF_STATUS (*txrx_get_vdev_param)(struct cdp_soc_t *soc,
  648. uint8_t vdev_id,
  649. enum cdp_vdev_param_type param,
  650. cdp_config_param_type *val);
  651. int (*enable_peer_based_pktlog)(struct cdp_soc_t *cdp_soc,
  652. uint8_t pdev_id,
  653. uint8_t *macaddr, uint8_t enb_dsb);
  654. QDF_STATUS
  655. (*calculate_delay_stats)(struct cdp_soc_t *cdp_soc,
  656. uint8_t vdev_id, qdf_nbuf_t nbuf);
  657. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  658. QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
  659. struct cdp_soc_t *soc, uint8_t pdev_id,
  660. uint32_t protocol_mask, uint16_t protocol_type,
  661. uint16_t tag);
  662. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  663. void (*txrx_dump_pdev_rx_protocol_tag_stats)(
  664. struct cdp_soc_t *soc, uint8_t pdev_id,
  665. uint16_t protocol_type);
  666. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  667. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  668. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  669. QDF_STATUS (*txrx_set_rx_flow_tag)(
  670. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  671. struct cdp_rx_flow_info *flow_info);
  672. QDF_STATUS (*txrx_dump_rx_flow_tag_stats)(
  673. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  674. struct cdp_rx_flow_info *flow_info);
  675. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  676. #ifdef QCA_MULTIPASS_SUPPORT
  677. void (*txrx_peer_set_vlan_id)(struct cdp_soc_t *cdp_soc,
  678. uint8_t vdev_id, uint8_t *peer_mac,
  679. uint16_t vlan_id);
  680. #endif
  681. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  682. QDF_STATUS (*txrx_update_peer_pkt_capture_params)(
  683. ol_txrx_soc_handle soc, uint8_t pdev_id,
  684. bool is_rx_pkt_cap_enable, uint8_t is_tx_pkt_cap_enable,
  685. uint8_t *peer_mac);
  686. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  687. QDF_STATUS
  688. (*txrx_set_psoc_param)(struct cdp_soc_t *soc,
  689. enum cdp_psoc_param_type param,
  690. cdp_config_param_type val);
  691. QDF_STATUS (*txrx_get_psoc_param)(ol_txrx_soc_handle soc,
  692. enum cdp_psoc_param_type type,
  693. cdp_config_param_type *val);
  694. #ifdef VDEV_PEER_PROTOCOL_COUNT
  695. /*
  696. * Enable per-peer protocol counters
  697. */
  698. void (*txrx_enable_peer_protocol_count)(struct cdp_soc_t *soc,
  699. int8_t vdev_id, bool enable);
  700. void (*txrx_set_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  701. int8_t vdev_id, int mask);
  702. int (*txrx_is_peer_protocol_count_enabled)(struct cdp_soc_t *soc,
  703. int8_t vdev_id);
  704. int (*txrx_get_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  705. int8_t vdev_id);
  706. #endif
  707. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  708. void (*txrx_set_delta_tsf)(struct cdp_soc_t *soc, uint8_t vdev_id,
  709. uint32_t delta_tsf);
  710. QDF_STATUS (*txrx_set_tsf_ul_delay_report)(struct cdp_soc_t *soc,
  711. uint8_t vdev_id,
  712. bool enable);
  713. QDF_STATUS (*txrx_get_uplink_delay)(struct cdp_soc_t *soc,
  714. uint8_t vdev_id,
  715. uint32_t *val);
  716. #endif
  717. };
  718. struct cdp_me_ops {
  719. void (*tx_me_alloc_descriptor)(struct cdp_soc_t *soc,
  720. uint8_t pdev_id);
  721. void (*tx_me_free_descriptor)(struct cdp_soc_t *soc, uint8_t pdev_id);
  722. uint16_t (*tx_me_convert_ucast)(struct cdp_soc_t *soc, uint8_t vdev_id,
  723. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  724. uint8_t newmaccnt, uint8_t tid,
  725. bool is_igmp);
  726. };
  727. struct cdp_mon_ops {
  728. QDF_STATUS (*txrx_reset_monitor_mode)
  729. (ol_txrx_soc_handle soc, uint8_t pdev_id, u_int8_t smart_monitor);
  730. QDF_STATUS (*txrx_deliver_tx_mgmt)
  731. (struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf);
  732. /* HK advance monitor filter support */
  733. QDF_STATUS (*txrx_set_advance_monitor_filter)
  734. (struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  735. struct cdp_monitor_filter *filter_val);
  736. /* Configure full monitor mode */
  737. QDF_STATUS
  738. (*config_full_mon_mode)(struct cdp_soc_t *soc, uint8_t val);
  739. QDF_STATUS (*soc_config_full_mon_mode)(struct cdp_pdev *cdp_pdev,
  740. uint8_t val);
  741. };
  742. struct cdp_host_stats_ops {
  743. int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  744. struct ol_txrx_stats_req *req);
  745. QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc,
  746. uint8_t vdev_id);
  747. QDF_STATUS
  748. (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  749. int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id,
  750. struct cdp_stats_extd *buf);
  751. /**
  752. * @brief Enable enhanced stats functionality.
  753. *
  754. * @param soc - the soc handle
  755. * @param pdev_id - pdev_id of pdev
  756. * @return - QDF_STATUS
  757. */
  758. QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc,
  759. uint8_t pdev_id);
  760. /**
  761. * @brief Disable enhanced stats functionality.
  762. *
  763. * @param soc - the soc handle
  764. * @param pdev_id - pdev_id of pdev
  765. * @return - QDF_STATUS
  766. */
  767. QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc,
  768. uint8_t pdev_id);
  769. QDF_STATUS
  770. (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  771. QDF_STATUS
  772. (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  773. QDF_STATUS
  774. (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  775. QDF_STATUS
  776. (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  777. QDF_STATUS
  778. (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  779. QDF_STATUS
  780. (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  781. QDF_STATUS
  782. (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  783. QDF_STATUS
  784. (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr);
  785. int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  786. struct ol_txrx_stats_req *req);
  787. int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc,
  788. uint8_t pdev_id,
  789. uint8_t *addr, void *stats,
  790. uint32_t last_tx_rate_mcs,
  791. uint32_t stats_id);
  792. QDF_STATUS
  793. (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  794. uint8_t *addr,
  795. uint32_t cap, uint32_t copy_stats);
  796. QDF_STATUS
  797. (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  798. void *data,
  799. uint32_t data_len);
  800. QDF_STATUS
  801. (*txrx_update_pdev_stats)(struct cdp_soc_t *soc,
  802. uint8_t pdev_id, void *data,
  803. uint16_t stats_id);
  804. QDF_STATUS
  805. (*txrx_get_peer_stats_param)(struct cdp_soc_t *soc,
  806. uint8_t vdev_id,
  807. uint8_t *peer_mac,
  808. enum cdp_peer_stats_type type,
  809. cdp_peer_stats_param_t *buf);
  810. QDF_STATUS
  811. (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  812. uint8_t *peer_mac,
  813. struct cdp_peer_stats *peer_stats);
  814. QDF_STATUS
  815. (*txrx_get_soc_stats)(struct cdp_soc_t *soc,
  816. struct cdp_soc_stats *soc_stats);
  817. QDF_STATUS
  818. (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc,
  819. uint8_t vdev_id,
  820. uint8_t *peer_mac);
  821. QDF_STATUS
  822. (*txrx_reset_peer_stats)(struct cdp_soc_t *soc,
  823. uint8_t vdev_id, uint8_t *peer_mac);
  824. int
  825. (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  826. void *buf, bool is_aggregate);
  827. int
  828. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  829. void *data, uint32_t len,
  830. uint32_t stats_id);
  831. int
  832. (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc,
  833. uint8_t vdev_id,
  834. wmi_host_vdev_extd_stats *buffer);
  835. QDF_STATUS
  836. (*txrx_update_vdev_stats)(struct cdp_soc_t *soc,
  837. uint8_t vdev_id, void *buf,
  838. uint16_t stats_id);
  839. int
  840. (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  841. void *buf);
  842. QDF_STATUS
  843. (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  844. struct cdp_pdev_stats *buf);
  845. int
  846. (*txrx_get_ratekbps)(int preamb, int mcs,
  847. int htflag, int gintval);
  848. QDF_STATUS
  849. (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  850. uint8_t *peer_mac, void *stats,
  851. uint32_t last_tx_rate_mcs,
  852. uint32_t stats_id);
  853. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  854. QDF_STATUS
  855. (*txrx_get_scan_spcl_vap_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  856. struct cdp_scan_spcl_vap_stats *stats);
  857. #endif
  858. };
  859. struct cdp_wds_ops {
  860. QDF_STATUS
  861. (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id,
  862. u_int32_t val);
  863. QDF_STATUS
  864. (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc,
  865. uint8_t vdev_id, uint8_t *peer_mac,
  866. int wds_tx_ucast, int wds_tx_mcast);
  867. int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id,
  868. uint32_t val);
  869. };
  870. struct cdp_raw_ops {
  871. QDF_STATUS
  872. (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id,
  873. qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast);
  874. };
  875. #ifdef PEER_FLOW_CONTROL
  876. struct cdp_pflow_ops {
  877. uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc,
  878. uint8_t pdev_id,
  879. enum _dp_param_t,
  880. uint32_t, void *);
  881. };
  882. #endif /* PEER_FLOW_CONTROL */
  883. #define LRO_IPV4_SEED_ARR_SZ 5
  884. #define LRO_IPV6_SEED_ARR_SZ 11
  885. /**
  886. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  887. * @lro_enable: indicates whether rx_offld is enabled
  888. * @tcp_flag: If the TCP flags from the packet do not match
  889. * the values in this field after masking with TCP flags mask
  890. * below, packet is not rx_offld eligible
  891. * @tcp_flag_mask: field for comparing the TCP values provided
  892. * above with the TCP flags field in the received packet
  893. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  894. * 5-tuple toeplitz hash for ipv4 packets
  895. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  896. * 5-tuple toeplitz hash for ipv6 packets
  897. */
  898. struct cdp_lro_hash_config {
  899. uint32_t lro_enable;
  900. uint32_t tcp_flag:9,
  901. tcp_flag_mask:9;
  902. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  903. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  904. };
  905. struct ol_if_ops {
  906. void
  907. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  908. uint8_t pdev_id, uint8_t *peer_macaddr,
  909. uint8_t vdev_id,
  910. bool hash_based, uint8_t ring_num);
  911. QDF_STATUS
  912. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  913. uint8_t pdev_id,
  914. uint8_t vdev_id, uint8_t *peer_mac,
  915. qdf_dma_addr_t hw_qdesc, int tid,
  916. uint16_t queue_num,
  917. uint8_t ba_window_size_valid,
  918. uint16_t ba_window_size);
  919. QDF_STATUS
  920. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  921. uint8_t pdev_id,
  922. uint8_t vdev_id, uint8_t *peer_macaddr,
  923. uint32_t tid_mask);
  924. int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc,
  925. uint8_t pdev_id,
  926. uint8_t *peer_mac,
  927. uint8_t *vdev_mac, enum wlan_op_mode opmode);
  928. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  929. int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  930. uint8_t vdev_id,
  931. uint8_t *peer_macaddr,
  932. uint16_t peer_id,
  933. const uint8_t *dest_macaddr,
  934. uint8_t *next_node_mac,
  935. uint32_t flags,
  936. uint8_t type);
  937. int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  938. uint8_t vdev_id,
  939. uint8_t *dest_macaddr,
  940. uint8_t *peer_macaddr,
  941. uint32_t flags);
  942. void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  943. uint8_t vdev_id,
  944. uint8_t *wds_macaddr,
  945. uint8_t type,
  946. uint8_t delete_in_fw);
  947. QDF_STATUS
  948. (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id,
  949. struct cdp_lro_hash_config *rx_offld_hash);
  950. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  951. uint8_t type);
  952. #ifdef FEATURE_NAC_RSSI
  953. uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc,
  954. uint8_t pdev_id, void *msg);
  955. #else
  956. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  957. #endif
  958. int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  959. uint16_t peer_id, uint16_t hw_peer_id,
  960. uint8_t vdev_id, uint8_t *peer_mac_addr,
  961. enum cdp_txrx_ast_entry_type peer_type,
  962. uint32_t tx_ast_hashidx);
  963. int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  964. uint16_t peer_id,
  965. uint8_t vdev_id);
  966. int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc,
  967. enum cdp_cfg_param_type param_num);
  968. void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc,
  969. uint8_t pdev_id,
  970. struct cdp_rx_mic_err_info *info);
  971. bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc,
  972. uint8_t vdev_id, uint8_t *peer_mac_addr,
  973. qdf_nbuf_t nbuf,
  974. uint16_t hdr_space);
  975. uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc,
  976. uint8_t pdev_id, uint16_t freq);
  977. uint8_t (*freq_to_band)(struct cdp_ctrl_objmgr_psoc *psoc,
  978. uint8_t pdev_id, uint16_t freq);
  979. QDF_STATUS(*set_mec_timer)(struct cdp_ctrl_objmgr_psoc *psoc,
  980. uint8_t vdev_id, uint16_t mec_timer_val);
  981. #ifdef ATH_SUPPORT_NAC_RSSI
  982. int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  983. uint8_t pdev_id,
  984. u_int8_t vdev_id,
  985. enum cdp_nac_param_cmd cmd, char *bssid,
  986. char *client_macaddr, uint8_t chan_num);
  987. int
  988. (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  989. uint8_t pdev_id, u_int8_t vdev_id,
  990. enum cdp_nac_param_cmd cmd,
  991. char *bssid, char *client_mac);
  992. #endif
  993. int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc,
  994. uint16_t pdev_id, uint8_t *peer_macaddr);
  995. /**
  996. * send_delba() - Send delba to peer
  997. * @psoc: Objmgr soc handle
  998. * @vdev_id: dp vdev id
  999. * @peer_macaddr: Peer mac addr
  1000. * @tid: Tid number
  1001. * @reason_code: Reason code
  1002. * @cdp_rcode: CDP reason code for sending DELBA
  1003. *
  1004. * Return: 0 for success, non-zero for failure
  1005. */
  1006. int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id,
  1007. uint8_t *peer_macaddr, uint8_t tid,
  1008. uint8_t reason_code, uint8_t cdp_rcode);
  1009. int
  1010. (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc,
  1011. uint8_t vdev_id,
  1012. uint8_t *dest_macaddr,
  1013. uint8_t *peer_macaddr,
  1014. uint32_t flags);
  1015. int
  1016. (*pdev_update_lmac_n_target_pdev_id)(struct cdp_ctrl_objmgr_psoc *psoc,
  1017. uint8_t *pdev_id,
  1018. uint8_t *lmac_id,
  1019. uint8_t *target_pdev_id);
  1020. bool (*is_roam_inprogress)(uint32_t vdev_id);
  1021. enum QDF_GLOBAL_MODE (*get_con_mode)(void);
  1022. #ifdef QCA_PEER_MULTIQ_SUPPORT
  1023. int (*peer_ast_flowid_map)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  1024. uint16_t peer_id, uint8_t vdev_id,
  1025. uint8_t *peer_mac_addr);
  1026. #endif
  1027. #ifdef DP_MEM_PRE_ALLOC
  1028. void *(*dp_prealloc_get_context)(uint32_t ctxt_type);
  1029. QDF_STATUS(*dp_prealloc_put_context)(uint32_t ctxt_type, void *vaddr);
  1030. void *(*dp_prealloc_get_consistent)(uint32_t *size,
  1031. void **base_vaddr_unaligned,
  1032. qdf_dma_addr_t *paddr_unaligned,
  1033. qdf_dma_addr_t *paddr_aligned,
  1034. uint32_t align,
  1035. uint32_t ring_type);
  1036. void (*dp_prealloc_put_consistent)(qdf_size_t size,
  1037. void *vaddr_unligned,
  1038. qdf_dma_addr_t paddr);
  1039. void (*dp_get_multi_pages)(uint32_t desc_type,
  1040. size_t element_size,
  1041. uint16_t element_num,
  1042. struct qdf_mem_multi_page_t *pages,
  1043. bool cacheable);
  1044. void (*dp_put_multi_pages)(uint32_t desc_type,
  1045. struct qdf_mem_multi_page_t *pages);
  1046. #endif
  1047. int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle);
  1048. char *(*get_device_name)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1049. uint8_t pdev_id);
  1050. QDF_STATUS(*nss_stats_clr)(struct cdp_ctrl_objmgr_psoc *psoc,
  1051. uint8_t vdev_id);
  1052. int (*dp_rx_get_pending)(ol_txrx_soc_handle soc);
  1053. void (*dp_rx_sched_refill_thread)(ol_txrx_soc_handle soc);
  1054. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  1055. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1056. void (*rx_wds_ext_peer_learn)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1057. uint16_t peer_id, uint8_t vdev_id,
  1058. uint8_t *peer_macaddr);
  1059. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  1060. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1061. QDF_STATUS(*peer_update_mesh_latency_params)(
  1062. struct cdp_ctrl_objmgr_psoc *psoc,
  1063. uint8_t vdev_id, uint8_t *peer_mac, uint8_t tid,
  1064. uint32_t service_interval_dl, uint32_t burst_size_dl,
  1065. uint32_t service_interval_ul, uint32_t burst_size_ul,
  1066. uint8_t add_or_sub, uint8_t ac);
  1067. #endif
  1068. uint32_t (*dp_get_tx_inqueue)(ol_txrx_soc_handle soc);
  1069. };
  1070. #ifdef DP_PEER_EXTENDED_API
  1071. /**
  1072. * struct cdp_misc_ops - mcl ops not classified
  1073. * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer
  1074. * @set_wmm_param: set wmm parameters
  1075. * @bad_peer_txctl_set_setting: configure bad peer tx limit setting
  1076. * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit
  1077. * @hl_tdls_flag_reset: reset tdls flag for vdev
  1078. * @tx_non_std: Allow the control-path SW to send data frames
  1079. * @get_vdev_id: get vdev id
  1080. * @set_wisa_mode: set wisa mode for a vdev
  1081. * @txrx_data_stall_cb_register: register data stall callback
  1082. * @txrx_data_stall_cb_deregister: deregister data stall callback
  1083. * @txrx_post_data_stall_event: post data stall event
  1084. * @runtime_suspend: ensure TXRX is ready to runtime suspend
  1085. * @runtime_resume: ensure TXRX is ready to runtime resume
  1086. * @get_opmode: get operation mode of vdev
  1087. * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for
  1088. marking first packet after wow wakeup
  1089. * @update_mac_id: update mac_id for vdev
  1090. * @flush_rx_frames: flush rx frames on the queue
  1091. * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that
  1092. has been forwarded from txrx layer
  1093. without going to upper layers
  1094. * @pkt_log_init: handler to initialize packet log
  1095. * @pkt_log_con_service: handler to connect packet log service
  1096. * @get_num_rx_contexts: handler to get number of RX contexts
  1097. * @register_packetdump_cb: register callback for different pktlog
  1098. * @unregister_packetdump_cb: unregister callback for different pktlog
  1099. * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag
  1100. * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag
  1101. *
  1102. * @vdev_inform_ll_conn: inform DP to add/delete a latency critical connection
  1103. * for this particular vdev.
  1104. * @set_swlm_enable: Enable or Disable Software Latency Manager.
  1105. * @is_swlm_enabled: Check if Software latency manager is enabled or not.
  1106. * @display_txrx_hw_info: Dump the DP rings info
  1107. *
  1108. * Function pointers for miscellaneous soc/pdev/vdev related operations.
  1109. */
  1110. struct cdp_misc_ops {
  1111. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl,
  1112. uint8_t vdev_id,
  1113. uint16_t timer_value_sec);
  1114. void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1115. struct ol_tx_wmm_param_t wmm_param);
  1116. void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl,
  1117. uint8_t pdev_id, int enable,
  1118. int period, int txq_limit);
  1119. void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl,
  1120. uint8_t pdev_id,
  1121. int level, int tput_thresh,
  1122. int tx_limit);
  1123. void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl,
  1124. uint8_t vdev_id, bool flag);
  1125. qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1126. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  1127. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  1128. uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl,
  1129. uint8_t vdev_id);
  1130. QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl,
  1131. uint8_t vdev_id, bool enable);
  1132. QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl,
  1133. uint8_t pdev_id,
  1134. data_stall_detect_cb cb);
  1135. QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl,
  1136. uint8_t pdev_id,
  1137. data_stall_detect_cb cb);
  1138. void (*txrx_post_data_stall_event)(
  1139. struct cdp_soc_t *soc_hdl,
  1140. enum data_stall_log_event_indicator indicator,
  1141. enum data_stall_log_event_type data_stall_type,
  1142. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  1143. enum data_stall_log_recovery_type recovery_type);
  1144. QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl,
  1145. uint8_t pdev_id);
  1146. QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl,
  1147. uint8_t pdev_id);
  1148. int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1149. void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl,
  1150. uint8_t pdev_id, uint8_t value);
  1151. void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1152. uint8_t mac_id);
  1153. void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1154. void *peer, bool drop);
  1155. A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl,
  1156. uint8_t vdev_id,
  1157. uint64_t *fwd_tx_packets,
  1158. uint64_t *fwd_rx_packets);
  1159. void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev,
  1160. void *scn);
  1161. void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl,
  1162. uint8_t pdev_id, void *scn);
  1163. void (*pkt_log_exit)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1164. int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl);
  1165. void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1166. ol_txrx_pktdump_cb tx_cb,
  1167. ol_txrx_pktdump_cb rx_cb);
  1168. void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl,
  1169. uint8_t pdev_id);
  1170. void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl,
  1171. uint8_t pdev_id);
  1172. void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl,
  1173. uint8_t vdev_id,
  1174. unsigned long rx_packets,
  1175. uint32_t time_in_ms,
  1176. uint32_t high_th,
  1177. uint32_t low_th);
  1178. void (*vdev_set_bundle_require_flag)(uint8_t vdev_id,
  1179. unsigned long tx_bytes,
  1180. uint32_t time_in_ms,
  1181. uint32_t high_th,
  1182. uint32_t low_th);
  1183. void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl,
  1184. uint8_t pdev_id);
  1185. QDF_STATUS (*txrx_ext_stats_request)(struct cdp_soc_t *soc_hdl,
  1186. uint8_t pdev_id,
  1187. struct cdp_txrx_ext_stats *req);
  1188. QDF_STATUS (*request_rx_hw_stats)(struct cdp_soc_t *soc_hdl,
  1189. uint8_t vdev_id);
  1190. void (*reset_rx_hw_ext_stats)(struct cdp_soc_t *soc_hdl);
  1191. QDF_STATUS (*vdev_inform_ll_conn)(struct cdp_soc_t *soc_hdl,
  1192. uint8_t vdev_id,
  1193. enum vdev_ll_conn_actions action);
  1194. QDF_STATUS (*set_swlm_enable)(struct cdp_soc_t *soc_hdl,
  1195. uint8_t val);
  1196. uint8_t (*is_swlm_enabled)(struct cdp_soc_t *soc_hdl);
  1197. void (*display_txrx_hw_info)(struct cdp_soc_t *soc_hdl);
  1198. uint32_t (*get_tx_rings_grp_bitmap)(struct cdp_soc_t *soc_hdl);
  1199. };
  1200. /**
  1201. * struct cdp_ocb_ops - mcl ocb ops
  1202. * @set_ocb_chan_info: set OCB channel info
  1203. * @get_ocb_chan_info: get OCB channel info
  1204. *
  1205. * Function pointers for operations related to OCB.
  1206. */
  1207. struct cdp_ocb_ops {
  1208. void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1209. struct ol_txrx_ocb_set_chan ocb_set_chan);
  1210. struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)(
  1211. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1212. };
  1213. /**
  1214. * struct cdp_peer_ops - mcl peer related ops
  1215. * @register_peer:
  1216. * @clear_peer:
  1217. * @find_peer_exist
  1218. * @find_peer_exist_on_vdev
  1219. * @find_peer_exist_on_other_vdev
  1220. * @peer_state_update:
  1221. * @get_vdevid:
  1222. * @register_ocb_peer:
  1223. * @peer_get_peer_mac_addr:
  1224. * @get_peer_state:
  1225. * @update_ibss_add_peer_num_of_vdev:
  1226. * @copy_mac_addr_raw:
  1227. * @add_last_real_peer:
  1228. * @is_vdev_restore_last_peer:
  1229. * @update_last_real_peer:
  1230. */
  1231. struct cdp_peer_ops {
  1232. QDF_STATUS (*register_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1233. struct ol_txrx_desc_type *sta_desc);
  1234. QDF_STATUS (*clear_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1235. struct qdf_mac_addr peer_addr);
  1236. bool (*find_peer_exist)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1237. uint8_t *peer_addr);
  1238. bool (*find_peer_exist_on_vdev)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1239. uint8_t *peer_addr);
  1240. bool (*find_peer_exist_on_other_vdev)(struct cdp_soc_t *soc,
  1241. uint8_t vdev_id,
  1242. uint8_t *peer_addr,
  1243. uint16_t max_bssid);
  1244. QDF_STATUS (*peer_state_update)(struct cdp_soc_t *soc,
  1245. uint8_t *peer_addr,
  1246. enum ol_txrx_peer_state state);
  1247. QDF_STATUS (*get_vdevid)(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  1248. uint8_t *vdev_id);
  1249. struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev,
  1250. struct qdf_mac_addr peer_addr);
  1251. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr);
  1252. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  1253. int (*get_peer_state)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1254. uint8_t *peer_mac);
  1255. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  1256. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_soc_t *soc,
  1257. uint8_t vdev_id,
  1258. int16_t peer_num_delta);
  1259. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  1260. ol_txrx_vdev_peer_remove_cb callback,
  1261. void *callback_context, bool remove_last_peer);
  1262. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  1263. ol_txrx_vdev_peer_remove_cb callback,
  1264. void *callback_context);
  1265. void (*copy_mac_addr_raw)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1266. uint8_t *bss_addr);
  1267. void (*add_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1268. uint8_t vdev_id);
  1269. bool (*is_vdev_restore_last_peer)(struct cdp_soc_t *soc,
  1270. uint8_t vdev_id,
  1271. uint8_t *peer_mac);
  1272. void (*update_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1273. uint8_t vdev_id, bool restore_last_peer);
  1274. void (*peer_detach_force_delete)(struct cdp_soc_t *soc_hdl,
  1275. uint8_t vdev_id, uint8_t *peer_addr);
  1276. void (*set_tdls_offchan_enabled)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1277. uint8_t *peer_mac, bool val);
  1278. void (*set_peer_as_tdls_peer)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1279. uint8_t *peer_mac, bool val);
  1280. void (*peer_flush_frags)(struct cdp_soc_t *soc_hdl,
  1281. uint8_t vdev_id, uint8_t *peer_mac);
  1282. };
  1283. /**
  1284. * struct cdp_mob_stats_ops - mcl mob stats ops
  1285. * @clear_stats: handler to clear ol txrx stats
  1286. * @stats: handler to update ol txrx stats
  1287. */
  1288. struct cdp_mob_stats_ops {
  1289. QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl,
  1290. uint8_t pdev_id, uint8_t bitmap);
  1291. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  1292. };
  1293. /**
  1294. * struct cdp_pmf_ops - mcl protected management frame ops
  1295. * @get_pn_info: handler to get pn info from peer
  1296. *
  1297. * Function pointers for pmf related operations.
  1298. */
  1299. struct cdp_pmf_ops {
  1300. void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac,
  1301. uint8_t vdev_id, uint8_t **last_pn_valid,
  1302. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  1303. };
  1304. #endif
  1305. #ifdef DP_FLOW_CTL
  1306. /**
  1307. * struct cdp_cfg_ops - mcl configuration ops
  1308. * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
  1309. * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
  1310. * @cfg_attach: hardcode the configuration parameters
  1311. * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
  1312. * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
  1313. * 1 enabled, 0 disabled.
  1314. * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
  1315. * indicate that mgmt over wmi is enabled
  1316. * or not,
  1317. * 1 for enabled, 0 for disable
  1318. * @is_high_latency: get device is high or low latency device,
  1319. * 1 high latency bus, 0 low latency bus
  1320. * @set_flow_control_parameters: set flow control parameters
  1321. * @set_flow_steering: set flow_steering_enabled flag
  1322. * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
  1323. * @set_new_htt_msg_format: set new_htt_msg_format flag
  1324. * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
  1325. * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
  1326. * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
  1327. * 1 enabled, 0 disabled.
  1328. * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
  1329. * 1 enabled, 0 disabled.
  1330. */
  1331. struct cdp_cfg_ops {
  1332. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  1333. uint8_t disable_rx_fwd);
  1334. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  1335. uint8_t val);
  1336. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  1337. void (*vdev_rx_set_intrabss_fwd)(struct cdp_soc_t *soc_hdl,
  1338. uint8_t vdev_id, bool val);
  1339. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  1340. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  1341. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  1342. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  1343. void *param);
  1344. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1345. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1346. void (*set_new_htt_msg_format)(uint8_t val);
  1347. void (*set_peer_unmap_conf_support)(bool val);
  1348. bool (*get_peer_unmap_conf_support)(void);
  1349. void (*set_tx_compl_tsf64)(bool val);
  1350. bool (*get_tx_compl_tsf64)(void);
  1351. };
  1352. /**
  1353. * struct cdp_flowctl_ops - mcl flow control
  1354. * @flow_pool_map_handler: handler to map flow_id and pool descriptors
  1355. * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors
  1356. * @register_pause_cb: handler to register tx pause callback
  1357. * @set_desc_global_pool_size: handler to set global pool size
  1358. * @dump_flow_pool_info: handler to dump global and flow pool info
  1359. * @tx_desc_thresh_reached: handler to set tx desc threshold
  1360. *
  1361. * Function pointers for operations related to flow control
  1362. */
  1363. struct cdp_flowctl_ops {
  1364. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  1365. uint8_t pdev_id,
  1366. uint8_t vdev_id);
  1367. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  1368. uint8_t pdev_id,
  1369. uint8_t vdev_id);
  1370. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  1371. tx_pause_callback);
  1372. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  1373. void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl);
  1374. bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl,
  1375. uint8_t vdev_id);
  1376. };
  1377. /**
  1378. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  1379. * @register_tx_flow_control: Register tx flow control callback
  1380. * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev
  1381. * @set_vdev_os_queue_status: Set vdev queue status
  1382. * @deregister_tx_flow_control_cb: Deregister tx flow control callback
  1383. * @flow_control_cb: Call osif flow control callback
  1384. * @get_tx_resource: Get tx resources and comapre with watermark
  1385. * @ll_set_tx_pause_q_depth: set pause queue depth
  1386. * @vdev_flush: Flush all packets on a particular vdev
  1387. * @vdev_pause: Pause a particular vdev
  1388. * @vdev_unpause: Unpause a particular vdev
  1389. *
  1390. * Function pointers for operations related to flow control
  1391. */
  1392. struct cdp_lflowctl_ops {
  1393. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  1394. int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl,
  1395. uint8_t pdev_id,
  1396. tx_pause_callback flowcontrol);
  1397. int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl,
  1398. uint8_t vdev_id, uint32_t chan_freq);
  1399. int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl,
  1400. uint8_t vdev_id,
  1401. enum netif_action_type action);
  1402. #else
  1403. int (*register_tx_flow_control)(
  1404. struct cdp_soc_t *soc_hdl,
  1405. uint8_t vdev_id,
  1406. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  1407. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  1408. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  1409. int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl,
  1410. uint8_t vdev_id);
  1411. void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1412. bool tx_resume);
  1413. bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1414. struct qdf_mac_addr peer_addr,
  1415. unsigned int low_watermark,
  1416. unsigned int high_watermark_offset);
  1417. int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1418. int pause_q_depth);
  1419. void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1420. void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1421. uint32_t reason, uint32_t pause_type);
  1422. void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1423. uint32_t reason, uint32_t pause_type);
  1424. };
  1425. /**
  1426. * struct cdp_throttle_ops - mcl throttle ops
  1427. * @throttle_init_period: handler to initialize tx throttle time
  1428. * @throttle_set_level: handler to set tx throttle level
  1429. */
  1430. struct cdp_throttle_ops {
  1431. void (*throttle_init_period)(struct cdp_soc_t *soc_hdl,
  1432. uint8_t pdev_id, int period,
  1433. uint8_t *dutycycle_level);
  1434. void (*throttle_set_level)(struct cdp_soc_t *soc_hdl,
  1435. uint8_t pdev_id, int level);
  1436. };
  1437. #endif
  1438. #ifdef IPA_OFFLOAD
  1439. /**
  1440. * struct cdp_ipa_ops - mcl ipa data path ops
  1441. * @ipa_get_resource:
  1442. * @ipa_set_doorbell_paddr:
  1443. * @ipa_set_active:
  1444. * @ipa_op_response:
  1445. * @ipa_register_op_cb:
  1446. * @ipa_get_stat:
  1447. * @ipa_tx_data_frame:
  1448. * @ipa_tx_buf_smmu_mapping: Create SMMU mappings for Tx
  1449. * @ipa_tx_buf_smmu_unmapping: Release SMMU mappings for Tx
  1450. * buffers to IPA
  1451. */
  1452. struct cdp_ipa_ops {
  1453. QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl,
  1454. uint8_t pdev_id);
  1455. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl,
  1456. uint8_t pdev_id);
  1457. QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1458. bool uc_active, bool is_tx);
  1459. QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl,
  1460. uint8_t pdev_id, uint8_t *op_msg);
  1461. QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl,
  1462. uint8_t pdev_id,
  1463. void (*ipa_uc_op_cb_type)
  1464. (uint8_t *op_msg, void *osif_ctxt),
  1465. void *usr_ctxt);
  1466. void (*ipa_deregister_op_cb)(struct cdp_soc_t *soc_hdl,
  1467. uint8_t pdev_id);
  1468. QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1469. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl,
  1470. uint8_t vdev_id, qdf_nbuf_t skb);
  1471. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  1472. uint32_t value);
  1473. #ifdef FEATURE_METERING
  1474. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl,
  1475. uint8_t pdev_id,
  1476. uint8_t reset_stats);
  1477. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl,
  1478. uint8_t pdev_id, uint64_t quota_bytes);
  1479. #endif
  1480. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl,
  1481. uint8_t pdev_id);
  1482. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl,
  1483. uint8_t pdev_id);
  1484. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  1485. defined(CONFIG_IPA_WDI_UNIFIED_API)
  1486. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1487. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1488. void *ipa_wdi_meter_notifier_cb,
  1489. uint32_t ipa_desc_size, void *ipa_priv,
  1490. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1491. uint32_t *rx_pipe_handle, bool is_smmu_enabled,
  1492. qdf_ipa_sys_connect_params_t *sys_in,
  1493. bool over_gsi);
  1494. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  1495. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1496. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1497. void *ipa_wdi_meter_notifier_cb,
  1498. uint32_t ipa_desc_size, void *ipa_priv,
  1499. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1500. uint32_t *rx_pipe_handle);
  1501. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  1502. QDF_STATUS (*ipa_cleanup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1503. uint32_t tx_pipe_handle,
  1504. uint32_t rx_pipe_handle);
  1505. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  1506. qdf_ipa_client_type_t prod_client,
  1507. qdf_ipa_client_type_t cons_client,
  1508. uint8_t session_id, bool is_ipv6_enabled);
  1509. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  1510. QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl,
  1511. uint8_t pdev_id);
  1512. QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl,
  1513. uint8_t pdev_id);
  1514. QDF_STATUS (*ipa_set_perf_level)(int client,
  1515. uint32_t max_supported_bw_mbps);
  1516. bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1517. qdf_nbuf_t nbuf, bool *fwd_success);
  1518. QDF_STATUS (*ipa_tx_buf_smmu_mapping)(struct cdp_soc_t *soc_hdl,
  1519. uint8_t pdev_id);
  1520. QDF_STATUS (*ipa_tx_buf_smmu_unmapping)(struct cdp_soc_t *soc_hdl,
  1521. uint8_t pdev_id);
  1522. };
  1523. #endif
  1524. #ifdef DP_POWER_SAVE
  1525. /**
  1526. * struct cdp_tx_delay_ops - mcl tx delay ops
  1527. * @tx_delay: handler to get tx packet delay
  1528. * @tx_delay_hist: handler to get tx packet delay histogram
  1529. * @tx_packet_count: handler to get tx packet count
  1530. * @tx_set_compute_interval: update compute interval period for TSM stats
  1531. *
  1532. * Function pointer for operations related to tx delay.
  1533. */
  1534. struct cdp_tx_delay_ops {
  1535. void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1536. uint32_t *queue_delay_microsec,
  1537. uint32_t *tx_delay_microsec, int category);
  1538. void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1539. uint16_t *bin_values, int category);
  1540. void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1541. uint16_t *out_packet_count,
  1542. uint16_t *out_packet_loss_count, int category);
  1543. void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl,
  1544. uint8_t pdev_id, uint32_t interval);
  1545. };
  1546. /**
  1547. * struct cdp_bus_ops - mcl bus suspend/resume ops
  1548. * @bus_suspend: handler for bus suspend
  1549. * @bus_resume: handler for bus resume
  1550. * @process_wow_ack_rsp: handler for wow ack response
  1551. * @process_target_suspend_req: handler for target suspend request
  1552. */
  1553. struct cdp_bus_ops {
  1554. QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1555. QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1556. void (*process_wow_ack_rsp)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1557. void (*process_target_suspend_req)(struct cdp_soc_t *soc_hdl,
  1558. uint8_t pdev_id);
  1559. };
  1560. #endif
  1561. #ifdef RECEIVE_OFFLOAD
  1562. /**
  1563. * struct cdp_rx_offld_ops - mcl host receive offload ops
  1564. * @register_rx_offld_flush_cb:
  1565. * @deregister_rx_offld_flush_cb:
  1566. */
  1567. struct cdp_rx_offld_ops {
  1568. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1569. void (*deregister_rx_offld_flush_cb)(void);
  1570. };
  1571. #endif
  1572. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1573. /**
  1574. * struct cdp_cfr_ops - host cfr ops
  1575. * @txrx_cfr_filter: Handler to configure host rx monitor status ring
  1576. * @txrx_get_cfr_rcc: Handler to get CFR mode
  1577. * @txrx_set_cfr_rcc: Handler to enable/disable CFR mode
  1578. * @txrx_get_cfr_dbg_stats: Handler to get debug statistics for CFR mode
  1579. * @txrx_clear_cfr_dbg_stats: Handler to clear debug statistics for CFR mode
  1580. * @txrx_enable_mon_reap_timer: Enable/Disable reap timer of monitor status ring
  1581. */
  1582. struct cdp_cfr_ops {
  1583. void (*txrx_cfr_filter)(struct cdp_soc_t *soc_hdl,
  1584. uint8_t pdev_id,
  1585. bool enable,
  1586. struct cdp_monitor_filter *filter_val);
  1587. bool (*txrx_get_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1588. uint8_t pdev_id);
  1589. void (*txrx_set_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1590. uint8_t pdev_id,
  1591. bool enable);
  1592. void (*txrx_get_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1593. uint8_t pdev_id,
  1594. struct cdp_cfr_rcc_stats *buf);
  1595. void (*txrx_clear_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1596. uint8_t pdev_id);
  1597. void (*txrx_enable_mon_reap_timer)(struct cdp_soc_t *soc_hdl,
  1598. uint8_t pdev_id,
  1599. bool enable);
  1600. };
  1601. #endif
  1602. #ifdef WLAN_SUPPORT_MSCS
  1603. /**
  1604. * struct cdp_mscs_ops - data path ops for MSCS
  1605. * @mscs_peer_lookup_n_get_priority:
  1606. */
  1607. struct cdp_mscs_ops {
  1608. int (*mscs_peer_lookup_n_get_priority)(struct cdp_soc_t *soc,
  1609. uint8_t *src_mac,
  1610. uint8_t *dst_mac,
  1611. qdf_nbuf_t nbuf);
  1612. };
  1613. #endif
  1614. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1615. /**
  1616. * struct cdp_mesh_latency_ops - data path ops for Mesh latency
  1617. * @mesh_latency_update_peer_parameter:
  1618. */
  1619. struct cdp_mesh_latency_ops {
  1620. QDF_STATUS (*mesh_latency_update_peer_parameter)(
  1621. struct cdp_soc_t *soc,
  1622. uint8_t *dest_mac, uint32_t service_interval_dl,
  1623. uint32_t burst_size_dl, uint32_t service_interval_ul,
  1624. uint32_t burst_size_ul, uint16_t priority,
  1625. uint8_t add_or_sub);
  1626. };
  1627. #endif
  1628. struct cdp_ops {
  1629. struct cdp_cmn_ops *cmn_drv_ops;
  1630. struct cdp_ctrl_ops *ctrl_ops;
  1631. struct cdp_me_ops *me_ops;
  1632. struct cdp_mon_ops *mon_ops;
  1633. struct cdp_host_stats_ops *host_stats_ops;
  1634. struct cdp_wds_ops *wds_ops;
  1635. struct cdp_raw_ops *raw_ops;
  1636. struct cdp_pflow_ops *pflow_ops;
  1637. #ifdef DP_PEER_EXTENDED_API
  1638. struct cdp_misc_ops *misc_ops;
  1639. struct cdp_peer_ops *peer_ops;
  1640. struct cdp_ocb_ops *ocb_ops;
  1641. struct cdp_mob_stats_ops *mob_stats_ops;
  1642. struct cdp_pmf_ops *pmf_ops;
  1643. #endif
  1644. #ifdef DP_FLOW_CTL
  1645. struct cdp_cfg_ops *cfg_ops;
  1646. struct cdp_flowctl_ops *flowctl_ops;
  1647. struct cdp_lflowctl_ops *l_flowctl_ops;
  1648. struct cdp_throttle_ops *throttle_ops;
  1649. #endif
  1650. #ifdef DP_POWER_SAVE
  1651. struct cdp_bus_ops *bus_ops;
  1652. struct cdp_tx_delay_ops *delay_ops;
  1653. #endif
  1654. #ifdef IPA_OFFLOAD
  1655. struct cdp_ipa_ops *ipa_ops;
  1656. #endif
  1657. #ifdef RECEIVE_OFFLOAD
  1658. struct cdp_rx_offld_ops *rx_offld_ops;
  1659. #endif
  1660. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1661. struct cdp_cfr_ops *cfr_ops;
  1662. #endif
  1663. #ifdef WLAN_SUPPORT_MSCS
  1664. struct cdp_mscs_ops *mscs_ops;
  1665. #endif
  1666. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1667. struct cdp_mesh_latency_ops *mesh_latency_ops;
  1668. #endif
  1669. };
  1670. #endif