cdp_txrx_ops.h 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864
  1. /*
  2. * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #include <wmi_unified_api.h>
  32. #include <wdi_event_api.h>
  33. #ifdef IPA_OFFLOAD
  34. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  35. defined(CONFIG_IPA_WDI_UNIFIED_API)
  36. #include <qdf_ipa_wdi3.h>
  37. #else
  38. #include <qdf_ipa.h>
  39. #endif
  40. #endif
  41. /**
  42. * bitmap values to indicate special handling of peer_delete
  43. */
  44. #define CDP_PEER_DELETE_NO_SPECIAL 0
  45. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  46. struct hif_opaque_softc;
  47. /* same as ieee80211_nac_param */
  48. enum cdp_nac_param_cmd {
  49. /* IEEE80211_NAC_PARAM_ADD */
  50. CDP_NAC_PARAM_ADD = 1,
  51. /* IEEE80211_NAC_PARAM_DEL */
  52. CDP_NAC_PARAM_DEL,
  53. /* IEEE80211_NAC_PARAM_LIST */
  54. CDP_NAC_PARAM_LIST,
  55. };
  56. #define CDP_DELBA_INTERVAL_MS 3000
  57. /**
  58. * enum cdp_delba_rcode - CDP reason code for sending DELBA
  59. * @CDP_DELBA_REASON_NONE: None
  60. * @CDP_DELBA_2K_JUMP: Sending DELBA from 2k_jump_handle
  61. */
  62. enum cdp_delba_rcode {
  63. CDP_DELBA_REASON_NONE = 0,
  64. CDP_DELBA_2K_JUMP,
  65. };
  66. /**
  67. * enum vdev_peer_protocol_enter_exit - whether ingress or egress
  68. * @CDP_VDEV_PEER_PROTOCOL_IS_INGRESS: ingress
  69. * @CDP_VDEV_PEER_PROTOCOL_IS_EGRESS: egress
  70. *
  71. * whether ingress or egress
  72. */
  73. enum vdev_peer_protocol_enter_exit {
  74. CDP_VDEV_PEER_PROTOCOL_IS_INGRESS,
  75. CDP_VDEV_PEER_PROTOCOL_IS_EGRESS
  76. };
  77. /**
  78. * enum vdev_peer_protocol_tx_rx - whether tx or rx
  79. * @CDP_VDEV_PEER_PROTOCOL_IS_TX: tx
  80. * @CDP_VDEV_PEER_PROTOCOL_IS_RX: rx
  81. *
  82. * whether tx or rx
  83. */
  84. enum vdev_peer_protocol_tx_rx {
  85. CDP_VDEV_PEER_PROTOCOL_IS_TX,
  86. CDP_VDEV_PEER_PROTOCOL_IS_RX
  87. };
  88. /**
  89. * enum vdev_ll_conn_actions - Actions to informvdev about
  90. * low latency connection.
  91. * @CDP_VDEV_LL_CONN_ADD: Add Low latency connection
  92. * @CDP_VDEV_LL_CONN_DEL: Delete Low latency connection
  93. */
  94. enum vdev_ll_conn_actions {
  95. CDP_VDEV_LL_CONN_ADD,
  96. CDP_VDEV_LL_CONN_DEL
  97. };
  98. /******************************************************************************
  99. *
  100. * Control Interface (A Interface)
  101. *
  102. *****************************************************************************/
  103. struct cdp_cmn_ops {
  104. QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  105. int (*txrx_pdev_attach_target)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  106. QDF_STATUS (*txrx_vdev_attach)
  107. (struct cdp_soc_t *soc, uint8_t pdev_id,
  108. struct cdp_vdev_info *vdev_info);
  109. QDF_STATUS
  110. (*txrx_vdev_detach)(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
  111. ol_txrx_vdev_delete_cb callback,
  112. void *cb_context);
  113. QDF_STATUS (*txrx_pdev_attach)
  114. (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev,
  115. qdf_device_t osdev, uint8_t pdev_id);
  116. int (*txrx_pdev_post_attach)(struct cdp_soc_t *soc, uint8_t pdev_id);
  117. void
  118. (*txrx_pdev_pre_detach)(struct cdp_soc_t *soc, uint8_t pdev_id,
  119. int force);
  120. QDF_STATUS
  121. (*txrx_pdev_detach)(struct cdp_soc_t *psoc, uint8_t pdev_id,
  122. int force);
  123. /**
  124. * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
  125. * @soc: soc dp handle
  126. * @pdev_id: id of Dp pdev handle
  127. * @force: Force deinit or not
  128. *
  129. * Return: QDF_STATUS
  130. */
  131. QDF_STATUS
  132. (*txrx_pdev_deinit)(struct cdp_soc_t *soc, uint8_t pdev_id,
  133. int force);
  134. QDF_STATUS
  135. (*txrx_peer_create)
  136. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  137. uint8_t *peer_mac_addr, enum cdp_peer_type peer_type);
  138. QDF_STATUS
  139. (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  140. uint8_t *peer_mac,
  141. struct cdp_peer_setup_info *setup_info);
  142. QDF_STATUS
  143. (*txrx_cp_peer_del_response)
  144. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  145. uint8_t *peer_mac_addr);
  146. QDF_STATUS
  147. (*txrx_peer_teardown)
  148. (struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *peer_mac);
  149. int (*txrx_peer_add_ast)
  150. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  151. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  152. uint32_t flags);
  153. int (*txrx_peer_update_ast)
  154. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  155. uint8_t *mac_addr, uint32_t flags);
  156. bool (*txrx_peer_get_ast_info_by_soc)
  157. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  158. struct cdp_ast_entry_info *ast_entry_info);
  159. bool (*txrx_peer_get_ast_info_by_pdev)
  160. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  161. uint8_t pdev_id,
  162. struct cdp_ast_entry_info *ast_entry_info);
  163. QDF_STATUS (*txrx_peer_ast_delete_by_soc)
  164. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  165. txrx_ast_free_cb callback,
  166. void *cookie);
  167. QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
  168. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  169. uint8_t pdev_id,
  170. txrx_ast_free_cb callback,
  171. void *cookie);
  172. QDF_STATUS
  173. (*txrx_peer_delete)(struct cdp_soc_t *soc, uint8_t vdev_id,
  174. uint8_t *peer_mac, uint32_t bitmap);
  175. QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_soc_t *soc,
  176. uint8_t vdev_id,
  177. uint8_t smart_monitor);
  178. void (*txrx_peer_delete_sync)(struct cdp_soc_t *soc, uint8_t vdev_id,
  179. uint8_t *peer_mac,
  180. QDF_STATUS(*delete_cb)(
  181. uint8_t vdev_id,
  182. uint32_t peerid_cnt,
  183. uint16_t *peerid_list),
  184. uint32_t bitmap);
  185. void (*txrx_peer_unmap_sync_cb_set)(struct cdp_soc_t *soc_hdl,
  186. uint8_t pdev_id,
  187. ol_txrx_peer_unmap_sync_cb
  188. peer_unmap_sync);
  189. QDF_STATUS
  190. (*txrx_get_peer_mac_from_peer_id)
  191. (struct cdp_soc_t *cdp_soc,
  192. uint32_t peer_id, uint8_t *peer_mac);
  193. void
  194. (*txrx_vdev_tx_lock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  195. void
  196. (*txrx_vdev_tx_unlock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  197. QDF_STATUS
  198. (*txrx_ath_getstats)(struct cdp_soc_t *soc, uint8_t id,
  199. struct cdp_dev_stats *stats, uint8_t type);
  200. QDF_STATUS
  201. (*txrx_set_gid_flag)(struct cdp_soc_t *soc, uint8_t pdev_id,
  202. u_int8_t *mem_status,
  203. u_int8_t *user_position);
  204. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_soc_t *soc,
  205. uint8_t pdev_id);
  206. QDF_STATUS
  207. (*txrx_if_mgmt_drain)(struct cdp_soc_t *soc, uint8_t pdev_id,
  208. int force);
  209. QDF_STATUS
  210. (*txrx_set_curchan)(struct cdp_soc_t *soc, uint8_t pdev_id,
  211. uint32_t chan_mhz);
  212. QDF_STATUS
  213. (*txrx_set_privacy_filters)
  214. (struct cdp_soc_t *soc, uint8_t vdev_id, void *filter,
  215. uint32_t num);
  216. uint32_t (*txrx_get_cfg)(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg);
  217. /********************************************************************
  218. * Data Interface (B Interface)
  219. ********************************************************************/
  220. QDF_STATUS
  221. (*txrx_vdev_register)(struct cdp_soc_t *soc, uint8_t vdev_id,
  222. ol_osif_vdev_handle osif_vdev,
  223. struct ol_txrx_ops *txrx_ops);
  224. int (*txrx_mgmt_send)(struct cdp_soc_t *soc, uint8_t vdev_id,
  225. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  226. int (*txrx_mgmt_send_ext)(struct cdp_soc_t *soc, uint8_t vdev_id,
  227. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  228. uint8_t use_6mbps, uint16_t chanfreq);
  229. /**
  230. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  231. * callback function
  232. */
  233. QDF_STATUS
  234. (*txrx_mgmt_tx_cb_set)(struct cdp_soc_t *soc, uint8_t pdev_id,
  235. uint8_t type,
  236. ol_txrx_mgmt_tx_cb download_cb,
  237. ol_txrx_mgmt_tx_cb ota_ack_cb,
  238. void *ctxt);
  239. /**
  240. * ol_txrx_data_tx_cb - Function registered with the data path
  241. * that is called when tx frames marked as "no free" are
  242. * done being transmitted
  243. */
  244. void (*txrx_data_tx_cb_set)(struct cdp_soc_t *soc, uint8_t vdev_id,
  245. ol_txrx_data_tx_cb callback, void *ctxt);
  246. qdf_nbuf_t (*tx_send_exc)
  247. (ol_txrx_soc_handle soc, uint8_t vdev_id, qdf_nbuf_t msdu_list,
  248. struct cdp_tx_exception_metadata *tx_exc_metadata);
  249. /*******************************************************************
  250. * Statistics and Debugging Interface (C Interface)
  251. ********************************************************************/
  252. int (*txrx_aggr_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  253. int max_subfrms_ampdu,
  254. int max_subfrms_amsdu);
  255. A_STATUS
  256. (*txrx_fw_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  257. struct ol_txrx_stats_req *req,
  258. bool per_vdev, bool response_expected);
  259. int (*txrx_debug)(struct cdp_soc_t *soc, uint8_t vdev_id,
  260. int debug_specs);
  261. QDF_STATUS
  262. (*txrx_fw_stats_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  263. uint8_t cfg_stats_type, uint32_t cfg_val);
  264. void (*txrx_print_level_set)(unsigned level);
  265. /**
  266. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  267. * @soc: datapath soc handle
  268. * @vdev_id: vdev id
  269. *
  270. * Return: vdev mac address
  271. */
  272. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_soc_t *soc,
  273. uint8_t vdev_id);
  274. /**
  275. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  276. * @soc: datapath soc handle
  277. * @vdev_id: vdev id
  278. *
  279. * Return: Handle to control pdev
  280. */
  281. struct cdp_cfg *(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_soc_t *soc,
  282. uint8_t vdev_id);
  283. /**
  284. * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
  285. * @soc: datapath soc handle
  286. * @pdev: pdev id
  287. *
  288. * Return: vdev_id
  289. */
  290. uint8_t (*txrx_get_mon_vdev_from_pdev)(struct cdp_soc_t *soc,
  291. uint8_t pdev_id);
  292. void (*txrx_soc_detach)(struct cdp_soc_t *soc);
  293. /**
  294. * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
  295. * @soc: Opaque Dp handle
  296. *
  297. * Return None
  298. */
  299. void (*txrx_soc_deinit)(struct cdp_soc_t *soc);
  300. /**
  301. * txrx_soc_init() - Initialize dp soc and dp ring memory
  302. * @soc: Opaque Dp handle
  303. * @ctrl_psoc: Opaque Cp handle
  304. * @htchdl: Opaque htc handle
  305. * @hifhdl: Opaque hif handle
  306. *
  307. * Return: None
  308. */
  309. void *(*txrx_soc_init)(struct cdp_soc_t *soc,
  310. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  311. struct hif_opaque_softc *hif_handle,
  312. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  313. struct ol_if_ops *ol_ops, uint16_t device_id);
  314. QDF_STATUS (*txrx_pdev_init)(ol_txrx_soc_handle soc,
  315. HTC_HANDLE htc_handle,
  316. qdf_device_t qdf_osdev,
  317. uint8_t pdev_id);
  318. /**
  319. * txrx_tso_soc_attach() - TSO attach handler triggered during
  320. * dynamic tso activation
  321. * @soc: Opaque Dp handle
  322. *
  323. * Return: QDF status
  324. */
  325. QDF_STATUS (*txrx_tso_soc_attach)(struct cdp_soc_t *soc);
  326. /**
  327. * txrx_tso_soc_detach() - TSO detach handler triggered during
  328. * dynamic tso de-activation
  329. * @soc: Opaque Dp handle
  330. *
  331. * Return: QDF status
  332. */
  333. QDF_STATUS (*txrx_tso_soc_detach)(struct cdp_soc_t *soc);
  334. int (*addba_resp_tx_completion)(struct cdp_soc_t *cdp_soc,
  335. uint8_t *peer_mac,
  336. uint16_t vdev_id, uint8_t tid,
  337. int status);
  338. int (*addba_requestprocess)(struct cdp_soc_t *cdp_soc,
  339. uint8_t *peer_mac,
  340. uint16_t vdev_id,
  341. uint8_t dialogtoken,
  342. uint16_t tid, uint16_t batimeout,
  343. uint16_t buffersize,
  344. uint16_t startseqnum);
  345. QDF_STATUS
  346. (*addba_responsesetup)(struct cdp_soc_t *cdp_soc,
  347. uint8_t *peer_mac,
  348. uint16_t vdev_id, uint8_t tid,
  349. uint8_t *dialogtoken, uint16_t *statuscode,
  350. uint16_t *buffersize, uint16_t *batimeout);
  351. int (*delba_process)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  352. uint16_t vdev_id, int tid, uint16_t reasoncode);
  353. /**
  354. * delba_tx_completion() - Indicate delba tx status
  355. * @cdp_soc: soc handle
  356. * @peer_mac: Peer mac address
  357. * @vdev_id: vdev id
  358. * @tid: Tid number
  359. * @status: Tx completion status
  360. *
  361. * Return: 0 on Success, 1 on failure
  362. */
  363. int (*delba_tx_completion)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  364. uint16_t vdev_id,
  365. uint8_t tid, int status);
  366. QDF_STATUS
  367. (*set_addba_response)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  368. uint16_t vdev_id, uint8_t tid,
  369. uint16_t statuscode);
  370. QDF_STATUS
  371. (*set_vdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  372. uint8_t vdev_id, uint8_t map_id);
  373. int (*txrx_get_total_per)(struct cdp_soc_t *soc, uint8_t pdev_id);
  374. void (*flush_cache_rx_queue)(void);
  375. QDF_STATUS (*set_pdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  376. uint8_t pdev_id,
  377. uint8_t map_id,
  378. uint8_t tos, uint8_t tid);
  379. QDF_STATUS (*txrx_stats_request)(struct cdp_soc_t *soc_handle,
  380. uint8_t vdev_id,
  381. struct cdp_txrx_stats_req *req);
  382. QDF_STATUS (*display_stats)(struct cdp_soc_t *psoc, uint16_t value,
  383. enum qdf_stats_verbosity_level level);
  384. QDF_STATUS (*txrx_intr_attach)(struct cdp_soc_t *soc_handle);
  385. void (*txrx_intr_detach)(struct cdp_soc_t *soc_handle);
  386. QDF_STATUS (*set_pn_check)(struct cdp_soc_t *soc_handle,
  387. uint8_t vdev_id, uint8_t *peermac,
  388. enum cdp_sec_type sec_type,
  389. uint32_t *rx_pn);
  390. QDF_STATUS(*set_key_sec_type)(struct cdp_soc_t *soc_handle,
  391. uint8_t vdev_id, uint8_t *peermac,
  392. enum cdp_sec_type sec_type,
  393. bool is_unicast);
  394. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  395. struct cdp_config_params *params);
  396. void *(*get_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  397. void (*set_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  398. void *dp_hdl);
  399. void *(*get_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  400. uint8_t vdev_id);
  401. QDF_STATUS (*set_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  402. uint8_t vdev_id,
  403. uint16_t size);
  404. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  405. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  406. void *dp_txrx_handle);
  407. QDF_STATUS (*map_pdev_to_lmac)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  408. uint32_t lmac_id);
  409. QDF_STATUS (*handle_mode_change)(ol_txrx_soc_handle soc,
  410. uint8_t pdev_id, uint32_t lmac_id);
  411. QDF_STATUS (*set_pdev_status_down)(struct cdp_soc_t *soc_handle,
  412. uint8_t pdev_id, bool is_pdev_down);
  413. QDF_STATUS (*txrx_peer_reset_ast)
  414. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
  415. uint8_t *peer_macaddr, uint8_t vdev_id);
  416. QDF_STATUS (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  417. uint8_t vdev_id);
  418. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  419. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  420. uint8_t ac, uint32_t value);
  421. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  422. uint8_t ac, uint32_t *value);
  423. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  424. uint32_t num_peers,
  425. uint32_t max_ast_index,
  426. uint8_t peer_map_unmap_v);
  427. QDF_STATUS (*set_soc_param)(ol_txrx_soc_handle soc,
  428. enum cdp_soc_param_t param,
  429. uint32_t value);
  430. ol_txrx_tx_fp tx_send;
  431. /**
  432. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  433. * to deliver pkt to stack.
  434. * @soc: datapath soc handle
  435. * @vdev: vdev id
  436. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  437. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  438. */
  439. void (*txrx_get_os_rx_handles_from_vdev)
  440. (ol_txrx_soc_handle soc,
  441. uint8_t vdev_id,
  442. ol_txrx_rx_fp *stack_fn,
  443. ol_osif_vdev_handle *osif_vdev);
  444. void (*set_rate_stats_ctx)(struct cdp_soc_t *soc,
  445. void *ctx);
  446. int (*txrx_classify_update)
  447. (struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t skb,
  448. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  449. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  450. enum cdp_capabilities dp_caps);
  451. void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
  452. QDF_STATUS (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
  453. uint8_t pdev_id,
  454. void *buf);
  455. void* (*txrx_peer_get_rdkstats_ctx)(struct cdp_soc_t *soc,
  456. uint8_t vdev_id,
  457. uint8_t *mac_addr);
  458. QDF_STATUS (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
  459. uint8_t pdev_id);
  460. QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_soc_t *soc,
  461. uint8_t pdev_id,
  462. uint8_t pcp, uint8_t tid);
  463. QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_soc_t *soc,
  464. uint8_t vdev_id,
  465. uint8_t pcp, uint8_t tid);
  466. #ifdef QCA_MULTIPASS_SUPPORT
  467. QDF_STATUS (*set_vlan_groupkey)(struct cdp_soc_t *soc, uint8_t vdev_id,
  468. uint16_t vlan_id, uint16_t group_key);
  469. #endif
  470. uint16_t (*get_peer_mac_list)
  471. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  472. u_int8_t newmac[][QDF_MAC_ADDR_SIZE], uint16_t mac_cnt,
  473. bool limit);
  474. #ifdef QCA_SUPPORT_WDS_EXTENDED
  475. uint16_t (*get_wds_ext_peer_id)(ol_txrx_soc_handle soc,
  476. uint8_t vdev_id,
  477. uint8_t *mac);
  478. QDF_STATUS (*set_wds_ext_peer_rx)(ol_txrx_soc_handle soc,
  479. uint8_t vdev_id,
  480. uint8_t *mac,
  481. ol_txrx_rx_fp rx,
  482. ol_osif_peer_handle osif_peer);
  483. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  484. void (*txrx_drain)(ol_txrx_soc_handle soc);
  485. int (*get_free_desc_poolsize)(struct cdp_soc_t *soc);
  486. #ifdef WLAN_SYSFS_DP_STATS
  487. QDF_STATUS (*txrx_sysfs_fill_stats)(ol_txrx_soc_handle soc,
  488. char *buf, uint32_t buf_size);
  489. QDF_STATUS (*txrx_sysfs_set_stat_type)(ol_txrx_soc_handle soc,
  490. uint32_t stat_type,
  491. uint32_t mac_id);
  492. #endif /* WLAN_SYSFS_DP_STATS */
  493. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  494. void (*set_pkt_capture_mode)(struct cdp_soc_t *soc, bool val);
  495. #endif
  496. };
  497. struct cdp_ctrl_ops {
  498. int
  499. (*txrx_mempools_attach)(ol_txrx_soc_handle dp_soc);
  500. int
  501. (*txrx_update_filter_neighbour_peers)(
  502. struct cdp_soc_t *soc, uint8_t vdev_id,
  503. uint32_t cmd, uint8_t *macaddr);
  504. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  505. /**
  506. * @brief Update the authorize peer object at association time
  507. * @details
  508. * For the host-based implementation of rate-control, it
  509. * updates the peer/node-related parameters within rate-control
  510. * context of the peer at association.
  511. *
  512. * @param soc_hdl - pointer to the soc object
  513. * @param vdev_id - id of the virtual object
  514. * @param peer_mac - mac address of the node's object
  515. * @authorize - either to authorize or unauthorize peer
  516. *
  517. * @return QDF_STATUS
  518. */
  519. QDF_STATUS
  520. (*txrx_peer_authorize)(struct cdp_soc_t *soc_hdl,
  521. uint8_t vdev_id,
  522. uint8_t *peer_mac,
  523. u_int32_t authorize);
  524. bool
  525. (*txrx_peer_get_authorize)(struct cdp_soc_t *soc_hdl,
  526. uint8_t vdev_id,
  527. uint8_t *peer_mac);
  528. void (*tx_flush_buffers)(struct cdp_soc_t *soc, uint8_t vdev_id);
  529. int (*txrx_is_target_ar900b)(struct cdp_soc_t *soc_hdl);
  530. QDF_STATUS
  531. (*txrx_set_vdev_param)(struct cdp_soc_t *soc, uint8_t vdev_id,
  532. enum cdp_vdev_param_type param,
  533. cdp_config_param_type val);
  534. /**
  535. * @brief Set the reo dest ring num of the radio
  536. * @details
  537. * Set the reo destination ring no on which we will receive
  538. * pkts for this radio.
  539. *
  540. * @txrx_soc - soc handle
  541. * @param pdev_id - id of physical device
  542. * @return the reo destination ring number
  543. * @param reo_dest_ring_num - value ranges between 1 - 4
  544. */
  545. QDF_STATUS (*txrx_set_pdev_reo_dest)(
  546. struct cdp_soc_t *txrx_soc,
  547. uint8_t pdev_id,
  548. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  549. /**
  550. * @brief Get the reo dest ring num of the radio
  551. * @details
  552. * Get the reo destination ring no on which we will receive
  553. * pkts for this radio.
  554. *
  555. * @txrx_soc - soc handle
  556. * @param pdev_id - id of physical device
  557. * @return the reo destination ring number
  558. */
  559. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  560. struct cdp_soc_t *txrx_soc,
  561. uint8_t pdev_id);
  562. int (*txrx_wdi_event_sub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  563. wdi_event_subscribe *event_cb_sub,
  564. uint32_t event);
  565. int (*txrx_wdi_event_unsub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  566. wdi_event_subscribe *event_cb_sub,
  567. uint32_t event);
  568. int (*txrx_get_sec_type)(ol_txrx_soc_handle soc, uint8_t vdev_id,
  569. uint8_t *peer_mac, uint8_t sec_idx);
  570. QDF_STATUS
  571. (*txrx_update_mgmt_txpow_vdev)(struct cdp_soc_t *soc,
  572. uint8_t vdev_id,
  573. uint8_t subtype, uint8_t tx_power);
  574. /**
  575. * txrx_set_pdev_param() - callback to set pdev parameter
  576. * @soc: opaque soc handle
  577. * @pdev_id:id of data path pdev handle
  578. * @val: value of pdev_tx_capture
  579. *
  580. * Return: status: 0 - Success, non-zero: Failure
  581. */
  582. QDF_STATUS (*txrx_set_pdev_param)(struct cdp_soc_t *soc,
  583. uint8_t pdev_id,
  584. enum cdp_pdev_param_type type,
  585. cdp_config_param_type val);
  586. QDF_STATUS (*txrx_get_pdev_param)(struct cdp_soc_t *soc,
  587. uint8_t pdev_id,
  588. enum cdp_pdev_param_type type,
  589. cdp_config_param_type *val);
  590. QDF_STATUS (*txrx_set_peer_param)(struct cdp_soc_t *soc,
  591. uint8_t vdev_id, uint8_t *peer_mac,
  592. enum cdp_peer_param_type param,
  593. cdp_config_param_type val);
  594. QDF_STATUS (*txrx_get_peer_param)(struct cdp_soc_t *soc,
  595. uint8_t vdev_id, uint8_t *peer_mac,
  596. enum cdp_peer_param_type param,
  597. cdp_config_param_type *val);
  598. void * (*txrx_get_pldev)(struct cdp_soc_t *soc, uint8_t pdev_id);
  599. void (*txrx_peer_flush_frags)(struct cdp_soc_t *soc, uint8_t vdev_id,
  600. uint8_t *peer_mac);
  601. #ifdef VDEV_PEER_PROTOCOL_COUNT
  602. void (*txrx_peer_protocol_cnt)(struct cdp_soc_t *soc,
  603. int8_t vdev_id,
  604. qdf_nbuf_t nbuf,
  605. bool is_egress,
  606. bool is_rx);
  607. #endif
  608. #ifdef ATH_SUPPORT_NAC_RSSI
  609. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_soc_t *cdp_soc,
  610. uint8_t vdev_id,
  611. enum cdp_nac_param_cmd cmd,
  612. char *bssid,
  613. char *client_macaddr,
  614. uint8_t chan_num);
  615. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_soc_t *cdp_soc,
  616. uint8_t vdev_id,
  617. char *macaddr,
  618. uint8_t *rssi);
  619. #endif
  620. #ifdef WLAN_SUPPORT_SCS
  621. QDF_STATUS
  622. (*txrx_enable_scs_params) (
  623. struct cdp_soc_t *soc, struct qdf_mac_addr
  624. *macaddr,
  625. uint8_t vdev_id,
  626. bool is_active);
  627. QDF_STATUS
  628. (*txrx_record_scs_params) (
  629. struct cdp_soc_t *soc, struct qdf_mac_addr
  630. *macaddr,
  631. uint8_t vdev_id,
  632. struct cdp_scs_params *scs_params,
  633. uint8_t entry_ctr,
  634. uint8_t scs_sessions);
  635. #endif
  636. #ifdef WLAN_SUPPORT_MSCS
  637. QDF_STATUS
  638. (*txrx_record_mscs_params) (
  639. struct cdp_soc_t *soc, uint8_t *macaddr,
  640. uint8_t vdev_id,
  641. struct cdp_mscs_params *mscs_params,
  642. bool active);
  643. #endif
  644. QDF_STATUS
  645. (*set_key)(struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *mac,
  646. bool is_unicast, uint32_t *key);
  647. QDF_STATUS (*txrx_get_vdev_param)(struct cdp_soc_t *soc,
  648. uint8_t vdev_id,
  649. enum cdp_vdev_param_type param,
  650. cdp_config_param_type *val);
  651. int (*enable_peer_based_pktlog)(struct cdp_soc_t *cdp_soc,
  652. uint8_t pdev_id,
  653. uint8_t *macaddr, uint8_t enb_dsb);
  654. QDF_STATUS
  655. (*calculate_delay_stats)(struct cdp_soc_t *cdp_soc,
  656. uint8_t vdev_id, qdf_nbuf_t nbuf);
  657. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  658. QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
  659. struct cdp_soc_t *soc, uint8_t pdev_id,
  660. uint32_t protocol_mask, uint16_t protocol_type,
  661. uint16_t tag);
  662. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  663. void (*txrx_dump_pdev_rx_protocol_tag_stats)(
  664. struct cdp_soc_t *soc, uint8_t pdev_id,
  665. uint16_t protocol_type);
  666. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  667. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  668. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  669. QDF_STATUS (*txrx_set_rx_flow_tag)(
  670. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  671. struct cdp_rx_flow_info *flow_info);
  672. QDF_STATUS (*txrx_dump_rx_flow_tag_stats)(
  673. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  674. struct cdp_rx_flow_info *flow_info);
  675. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  676. #ifdef QCA_MULTIPASS_SUPPORT
  677. void (*txrx_peer_set_vlan_id)(struct cdp_soc_t *cdp_soc,
  678. uint8_t vdev_id, uint8_t *peer_mac,
  679. uint16_t vlan_id);
  680. #endif
  681. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  682. QDF_STATUS (*txrx_update_peer_pkt_capture_params)(
  683. ol_txrx_soc_handle soc, uint8_t pdev_id,
  684. bool is_rx_pkt_cap_enable, uint8_t is_tx_pkt_cap_enable,
  685. uint8_t *peer_mac);
  686. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  687. QDF_STATUS
  688. (*txrx_set_psoc_param)(struct cdp_soc_t *soc,
  689. enum cdp_psoc_param_type param,
  690. cdp_config_param_type val);
  691. QDF_STATUS (*txrx_get_psoc_param)(ol_txrx_soc_handle soc,
  692. enum cdp_psoc_param_type type,
  693. cdp_config_param_type *val);
  694. #ifdef VDEV_PEER_PROTOCOL_COUNT
  695. /*
  696. * Enable per-peer protocol counters
  697. */
  698. void (*txrx_enable_peer_protocol_count)(struct cdp_soc_t *soc,
  699. int8_t vdev_id, bool enable);
  700. void (*txrx_set_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  701. int8_t vdev_id, int mask);
  702. int (*txrx_is_peer_protocol_count_enabled)(struct cdp_soc_t *soc,
  703. int8_t vdev_id);
  704. int (*txrx_get_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  705. int8_t vdev_id);
  706. #endif
  707. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  708. void (*txrx_set_delta_tsf)(struct cdp_soc_t *soc, uint8_t vdev_id,
  709. uint32_t delta_tsf);
  710. QDF_STATUS (*txrx_set_tsf_ul_delay_report)(struct cdp_soc_t *soc,
  711. uint8_t vdev_id,
  712. bool enable);
  713. QDF_STATUS (*txrx_get_uplink_delay)(struct cdp_soc_t *soc,
  714. uint8_t vdev_id,
  715. uint32_t *val);
  716. #endif
  717. };
  718. struct cdp_me_ops {
  719. void (*tx_me_alloc_descriptor)(struct cdp_soc_t *soc,
  720. uint8_t pdev_id);
  721. void (*tx_me_free_descriptor)(struct cdp_soc_t *soc, uint8_t pdev_id);
  722. uint16_t (*tx_me_convert_ucast)(struct cdp_soc_t *soc, uint8_t vdev_id,
  723. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  724. uint8_t newmaccnt, uint8_t tid,
  725. bool is_igmp);
  726. };
  727. struct cdp_mon_ops {
  728. QDF_STATUS (*txrx_reset_monitor_mode)
  729. (ol_txrx_soc_handle soc, uint8_t pdev_id, u_int8_t smart_monitor);
  730. QDF_STATUS (*txrx_deliver_tx_mgmt)
  731. (struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf);
  732. /* HK advance monitor filter support */
  733. QDF_STATUS (*txrx_set_advance_monitor_filter)
  734. (struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  735. struct cdp_monitor_filter *filter_val);
  736. /* Configure full monitor mode */
  737. QDF_STATUS
  738. (*config_full_mon_mode)(struct cdp_soc_t *soc, uint8_t val);
  739. QDF_STATUS (*soc_config_full_mon_mode)(struct cdp_pdev *cdp_pdev,
  740. uint8_t val);
  741. };
  742. struct cdp_host_stats_ops {
  743. int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  744. struct ol_txrx_stats_req *req);
  745. QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc,
  746. uint8_t vdev_id);
  747. QDF_STATUS
  748. (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  749. int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id,
  750. struct cdp_stats_extd *buf);
  751. /**
  752. * @brief Enable enhanced stats functionality.
  753. *
  754. * @param soc - the soc handle
  755. * @param pdev_id - pdev_id of pdev
  756. * @return - QDF_STATUS
  757. */
  758. QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc,
  759. uint8_t pdev_id);
  760. /**
  761. * @brief Disable enhanced stats functionality.
  762. *
  763. * @param soc - the soc handle
  764. * @param pdev_id - pdev_id of pdev
  765. * @return - QDF_STATUS
  766. */
  767. QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc,
  768. uint8_t pdev_id);
  769. QDF_STATUS
  770. (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  771. QDF_STATUS
  772. (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  773. QDF_STATUS
  774. (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  775. QDF_STATUS
  776. (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  777. QDF_STATUS
  778. (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  779. QDF_STATUS
  780. (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  781. QDF_STATUS
  782. (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  783. QDF_STATUS
  784. (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr);
  785. int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  786. struct ol_txrx_stats_req *req);
  787. int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc,
  788. uint8_t pdev_id,
  789. uint8_t *addr, void *stats,
  790. uint32_t last_tx_rate_mcs,
  791. uint32_t stats_id);
  792. QDF_STATUS
  793. (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  794. uint8_t *addr,
  795. uint32_t cap, uint32_t copy_stats);
  796. QDF_STATUS
  797. (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  798. void *data,
  799. uint32_t data_len);
  800. QDF_STATUS
  801. (*txrx_update_pdev_stats)(struct cdp_soc_t *soc,
  802. uint8_t pdev_id, void *data,
  803. uint16_t stats_id);
  804. QDF_STATUS
  805. (*txrx_get_peer_stats_param)(struct cdp_soc_t *soc,
  806. uint8_t vdev_id,
  807. uint8_t *peer_mac,
  808. enum cdp_peer_stats_type type,
  809. cdp_peer_stats_param_t *buf);
  810. QDF_STATUS
  811. (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  812. uint8_t *peer_mac,
  813. struct cdp_peer_stats *peer_stats);
  814. QDF_STATUS
  815. (*txrx_get_soc_stats)(struct cdp_soc_t *soc,
  816. struct cdp_soc_stats *soc_stats);
  817. QDF_STATUS
  818. (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc,
  819. uint8_t vdev_id,
  820. uint8_t *peer_mac);
  821. QDF_STATUS
  822. (*txrx_reset_peer_stats)(struct cdp_soc_t *soc,
  823. uint8_t vdev_id, uint8_t *peer_mac);
  824. int
  825. (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  826. void *buf, bool is_aggregate);
  827. int
  828. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  829. void *data, uint32_t len,
  830. uint32_t stats_id);
  831. int
  832. (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc,
  833. uint8_t vdev_id,
  834. wmi_host_vdev_extd_stats *buffer);
  835. QDF_STATUS
  836. (*txrx_update_vdev_stats)(struct cdp_soc_t *soc,
  837. uint8_t vdev_id, void *buf,
  838. uint16_t stats_id);
  839. int
  840. (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  841. void *buf);
  842. QDF_STATUS
  843. (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  844. struct cdp_pdev_stats *buf);
  845. int
  846. (*txrx_get_ratekbps)(int preamb, int mcs,
  847. int htflag, int gintval);
  848. QDF_STATUS
  849. (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  850. uint8_t *peer_mac, void *stats,
  851. uint32_t last_tx_rate_mcs,
  852. uint32_t stats_id);
  853. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  854. QDF_STATUS
  855. (*txrx_get_scan_spcl_vap_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  856. struct cdp_scan_spcl_vap_stats *stats);
  857. #endif
  858. };
  859. struct cdp_wds_ops {
  860. QDF_STATUS
  861. (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id,
  862. u_int32_t val);
  863. QDF_STATUS
  864. (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc,
  865. uint8_t vdev_id, uint8_t *peer_mac,
  866. int wds_tx_ucast, int wds_tx_mcast);
  867. int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id,
  868. uint32_t val);
  869. };
  870. struct cdp_raw_ops {
  871. QDF_STATUS
  872. (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id,
  873. qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast);
  874. };
  875. #ifdef PEER_FLOW_CONTROL
  876. struct cdp_pflow_ops {
  877. uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc,
  878. uint8_t pdev_id,
  879. enum _dp_param_t,
  880. uint32_t, void *);
  881. };
  882. #endif /* PEER_FLOW_CONTROL */
  883. #define LRO_IPV4_SEED_ARR_SZ 5
  884. #define LRO_IPV6_SEED_ARR_SZ 11
  885. /**
  886. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  887. * @lro_enable: indicates whether rx_offld is enabled
  888. * @tcp_flag: If the TCP flags from the packet do not match
  889. * the values in this field after masking with TCP flags mask
  890. * below, packet is not rx_offld eligible
  891. * @tcp_flag_mask: field for comparing the TCP values provided
  892. * above with the TCP flags field in the received packet
  893. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  894. * 5-tuple toeplitz hash for ipv4 packets
  895. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  896. * 5-tuple toeplitz hash for ipv6 packets
  897. */
  898. struct cdp_lro_hash_config {
  899. uint32_t lro_enable;
  900. uint32_t tcp_flag:9,
  901. tcp_flag_mask:9;
  902. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  903. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  904. };
  905. struct ol_if_ops {
  906. void
  907. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  908. uint8_t pdev_id, uint8_t *peer_macaddr,
  909. uint8_t vdev_id,
  910. bool hash_based, uint8_t ring_num);
  911. QDF_STATUS
  912. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  913. uint8_t pdev_id,
  914. uint8_t vdev_id, uint8_t *peer_mac,
  915. qdf_dma_addr_t hw_qdesc, int tid,
  916. uint16_t queue_num,
  917. uint8_t ba_window_size_valid,
  918. uint16_t ba_window_size);
  919. QDF_STATUS
  920. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  921. uint8_t pdev_id,
  922. uint8_t vdev_id, uint8_t *peer_macaddr,
  923. uint32_t tid_mask);
  924. int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc,
  925. uint8_t pdev_id,
  926. uint8_t *peer_mac,
  927. uint8_t *vdev_mac, enum wlan_op_mode opmode);
  928. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  929. int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  930. uint8_t vdev_id,
  931. uint8_t *peer_macaddr,
  932. uint16_t peer_id,
  933. const uint8_t *dest_macaddr,
  934. uint8_t *next_node_mac,
  935. uint32_t flags,
  936. uint8_t type);
  937. int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  938. uint8_t vdev_id,
  939. uint8_t *dest_macaddr,
  940. uint8_t *peer_macaddr,
  941. uint32_t flags);
  942. void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  943. uint8_t vdev_id,
  944. uint8_t *wds_macaddr,
  945. uint8_t type,
  946. uint8_t delete_in_fw);
  947. QDF_STATUS
  948. (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id,
  949. struct cdp_lro_hash_config *rx_offld_hash);
  950. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  951. uint8_t type);
  952. #ifdef FEATURE_NAC_RSSI
  953. uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc,
  954. uint8_t pdev_id, void *msg);
  955. #else
  956. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  957. #endif
  958. int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  959. uint16_t peer_id, uint16_t hw_peer_id,
  960. uint8_t vdev_id, uint8_t *peer_mac_addr,
  961. enum cdp_txrx_ast_entry_type peer_type,
  962. uint32_t tx_ast_hashidx);
  963. int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  964. uint16_t peer_id,
  965. uint8_t vdev_id);
  966. int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc,
  967. enum cdp_cfg_param_type param_num);
  968. void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc,
  969. uint8_t pdev_id,
  970. struct cdp_rx_mic_err_info *info);
  971. bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc,
  972. uint8_t vdev_id, uint8_t *peer_mac_addr,
  973. qdf_nbuf_t nbuf,
  974. uint16_t hdr_space);
  975. uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc,
  976. uint8_t pdev_id, uint16_t freq);
  977. uint8_t (*freq_to_band)(struct cdp_ctrl_objmgr_psoc *psoc,
  978. uint8_t pdev_id, uint16_t freq);
  979. QDF_STATUS(*set_mec_timer)(struct cdp_ctrl_objmgr_psoc *psoc,
  980. uint8_t vdev_id, uint16_t mec_timer_val);
  981. #ifdef ATH_SUPPORT_NAC_RSSI
  982. int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  983. uint8_t pdev_id,
  984. u_int8_t vdev_id,
  985. enum cdp_nac_param_cmd cmd, char *bssid,
  986. char *client_macaddr, uint8_t chan_num);
  987. int
  988. (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  989. uint8_t pdev_id, u_int8_t vdev_id,
  990. enum cdp_nac_param_cmd cmd,
  991. char *bssid, char *client_mac);
  992. #endif
  993. int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc,
  994. uint16_t pdev_id, uint8_t *peer_macaddr);
  995. /**
  996. * send_delba() - Send delba to peer
  997. * @psoc: Objmgr soc handle
  998. * @vdev_id: dp vdev id
  999. * @peer_macaddr: Peer mac addr
  1000. * @tid: Tid number
  1001. * @reason_code: Reason code
  1002. * @cdp_rcode: CDP reason code for sending DELBA
  1003. *
  1004. * Return: 0 for success, non-zero for failure
  1005. */
  1006. int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id,
  1007. uint8_t *peer_macaddr, uint8_t tid,
  1008. uint8_t reason_code, uint8_t cdp_rcode);
  1009. int
  1010. (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc,
  1011. uint8_t vdev_id,
  1012. uint8_t *dest_macaddr,
  1013. uint8_t *peer_macaddr,
  1014. uint32_t flags);
  1015. int
  1016. (*pdev_update_lmac_n_target_pdev_id)(struct cdp_ctrl_objmgr_psoc *psoc,
  1017. uint8_t *pdev_id,
  1018. uint8_t *lmac_id,
  1019. uint8_t *target_pdev_id);
  1020. bool (*is_roam_inprogress)(uint32_t vdev_id);
  1021. enum QDF_GLOBAL_MODE (*get_con_mode)(void);
  1022. #ifdef QCA_PEER_MULTIQ_SUPPORT
  1023. int (*peer_ast_flowid_map)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  1024. uint16_t peer_id, uint8_t vdev_id,
  1025. uint8_t *peer_mac_addr);
  1026. #endif
  1027. #ifdef DP_MEM_PRE_ALLOC
  1028. void *(*dp_prealloc_get_context)(uint32_t ctxt_type);
  1029. QDF_STATUS(*dp_prealloc_put_context)(uint32_t ctxt_type, void *vaddr);
  1030. void *(*dp_prealloc_get_consistent)(uint32_t *size,
  1031. void **base_vaddr_unaligned,
  1032. qdf_dma_addr_t *paddr_unaligned,
  1033. qdf_dma_addr_t *paddr_aligned,
  1034. uint32_t align,
  1035. uint32_t ring_type);
  1036. void (*dp_prealloc_put_consistent)(qdf_size_t size,
  1037. void *vaddr_unligned,
  1038. qdf_dma_addr_t paddr);
  1039. void (*dp_get_multi_pages)(uint32_t desc_type,
  1040. size_t element_size,
  1041. uint16_t element_num,
  1042. struct qdf_mem_multi_page_t *pages,
  1043. bool cacheable);
  1044. void (*dp_put_multi_pages)(uint32_t desc_type,
  1045. struct qdf_mem_multi_page_t *pages);
  1046. #endif
  1047. int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle);
  1048. char *(*get_device_name)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1049. uint8_t pdev_id);
  1050. QDF_STATUS(*nss_stats_clr)(struct cdp_ctrl_objmgr_psoc *psoc,
  1051. uint8_t vdev_id);
  1052. int (*dp_rx_get_pending)(ol_txrx_soc_handle soc);
  1053. void (*dp_rx_sched_refill_thread)(ol_txrx_soc_handle soc);
  1054. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  1055. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1056. void (*rx_wds_ext_peer_learn)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1057. uint16_t peer_id, uint8_t vdev_id,
  1058. uint8_t *peer_macaddr);
  1059. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  1060. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1061. QDF_STATUS(*peer_update_mesh_latency_params)(
  1062. struct cdp_ctrl_objmgr_psoc *psoc,
  1063. uint8_t vdev_id, uint8_t *peer_mac, uint8_t tid,
  1064. uint32_t service_interval_dl, uint32_t burst_size_dl,
  1065. uint32_t service_interval_ul, uint32_t burst_size_ul,
  1066. uint8_t add_or_sub, uint8_t ac);
  1067. #endif
  1068. uint32_t (*dp_get_tx_inqueue)(ol_txrx_soc_handle soc);
  1069. };
  1070. #ifdef DP_PEER_EXTENDED_API
  1071. /**
  1072. * struct cdp_misc_ops - mcl ops not classified
  1073. * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer
  1074. * @set_wmm_param: set wmm parameters
  1075. * @bad_peer_txctl_set_setting: configure bad peer tx limit setting
  1076. * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit
  1077. * @hl_tdls_flag_reset: reset tdls flag for vdev
  1078. * @tx_non_std: Allow the control-path SW to send data frames
  1079. * @get_vdev_id: get vdev id
  1080. * @set_wisa_mode: set wisa mode for a vdev
  1081. * @txrx_data_stall_cb_register: register data stall callback
  1082. * @txrx_data_stall_cb_deregister: deregister data stall callback
  1083. * @txrx_post_data_stall_event: post data stall event
  1084. * @runtime_suspend: ensure TXRX is ready to runtime suspend
  1085. * @runtime_resume: ensure TXRX is ready to runtime resume
  1086. * @get_opmode: get operation mode of vdev
  1087. * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for
  1088. marking first packet after wow wakeup
  1089. * @update_mac_id: update mac_id for vdev
  1090. * @flush_rx_frames: flush rx frames on the queue
  1091. * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that
  1092. has been forwarded from txrx layer
  1093. without going to upper layers
  1094. * @pkt_log_init: handler to initialize packet log
  1095. * @pkt_log_con_service: handler to connect packet log service
  1096. * @get_num_rx_contexts: handler to get number of RX contexts
  1097. * @register_packetdump_cb: register callback for different pktlog
  1098. * @unregister_packetdump_cb: unregister callback for different pktlog
  1099. * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag
  1100. * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag
  1101. *
  1102. * @vdev_inform_ll_conn: inform DP to add/delete a latency critical connection
  1103. * for this particular vdev.
  1104. * @set_swlm_enable: Enable or Disable Software Latency Manager.
  1105. * @is_swlm_enabled: Check if Software latency manager is enabled or not.
  1106. * @display_txrx_hw_info: Dump the DP rings info
  1107. *
  1108. * Function pointers for miscellaneous soc/pdev/vdev related operations.
  1109. */
  1110. struct cdp_misc_ops {
  1111. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl,
  1112. uint8_t vdev_id,
  1113. uint16_t timer_value_sec);
  1114. void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1115. struct ol_tx_wmm_param_t wmm_param);
  1116. void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl,
  1117. uint8_t pdev_id, int enable,
  1118. int period, int txq_limit);
  1119. void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl,
  1120. uint8_t pdev_id,
  1121. int level, int tput_thresh,
  1122. int tx_limit);
  1123. void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl,
  1124. uint8_t vdev_id, bool flag);
  1125. qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1126. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  1127. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  1128. uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl,
  1129. uint8_t vdev_id);
  1130. QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl,
  1131. uint8_t vdev_id, bool enable);
  1132. QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl,
  1133. uint8_t pdev_id,
  1134. data_stall_detect_cb cb);
  1135. QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl,
  1136. uint8_t pdev_id,
  1137. data_stall_detect_cb cb);
  1138. void (*txrx_post_data_stall_event)(
  1139. struct cdp_soc_t *soc_hdl,
  1140. enum data_stall_log_event_indicator indicator,
  1141. enum data_stall_log_event_type data_stall_type,
  1142. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  1143. enum data_stall_log_recovery_type recovery_type);
  1144. QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl,
  1145. uint8_t pdev_id);
  1146. QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl,
  1147. uint8_t pdev_id);
  1148. int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1149. void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl,
  1150. uint8_t pdev_id, uint8_t value);
  1151. void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1152. uint8_t mac_id);
  1153. void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1154. void *peer, bool drop);
  1155. A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl,
  1156. uint8_t vdev_id,
  1157. uint64_t *fwd_tx_packets,
  1158. uint64_t *fwd_rx_packets);
  1159. void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev,
  1160. void *scn);
  1161. void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl,
  1162. uint8_t pdev_id, void *scn);
  1163. void (*pkt_log_exit)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1164. int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl);
  1165. void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1166. ol_txrx_pktdump_cb tx_cb,
  1167. ol_txrx_pktdump_cb rx_cb);
  1168. void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl,
  1169. uint8_t pdev_id);
  1170. void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl,
  1171. uint8_t pdev_id);
  1172. void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl,
  1173. uint8_t vdev_id,
  1174. unsigned long rx_packets,
  1175. uint32_t time_in_ms,
  1176. uint32_t high_th,
  1177. uint32_t low_th);
  1178. void (*vdev_set_bundle_require_flag)(uint8_t vdev_id,
  1179. unsigned long tx_bytes,
  1180. uint32_t time_in_ms,
  1181. uint32_t high_th,
  1182. uint32_t low_th);
  1183. void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl,
  1184. uint8_t pdev_id);
  1185. QDF_STATUS (*txrx_ext_stats_request)(struct cdp_soc_t *soc_hdl,
  1186. uint8_t pdev_id,
  1187. struct cdp_txrx_ext_stats *req);
  1188. QDF_STATUS (*request_rx_hw_stats)(struct cdp_soc_t *soc_hdl,
  1189. uint8_t vdev_id);
  1190. void (*reset_rx_hw_ext_stats)(struct cdp_soc_t *soc_hdl);
  1191. QDF_STATUS (*vdev_inform_ll_conn)(struct cdp_soc_t *soc_hdl,
  1192. uint8_t vdev_id,
  1193. enum vdev_ll_conn_actions action);
  1194. QDF_STATUS (*set_swlm_enable)(struct cdp_soc_t *soc_hdl,
  1195. uint8_t val);
  1196. uint8_t (*is_swlm_enabled)(struct cdp_soc_t *soc_hdl);
  1197. void (*display_txrx_hw_info)(struct cdp_soc_t *soc_hdl);
  1198. };
  1199. /**
  1200. * struct cdp_ocb_ops - mcl ocb ops
  1201. * @set_ocb_chan_info: set OCB channel info
  1202. * @get_ocb_chan_info: get OCB channel info
  1203. *
  1204. * Function pointers for operations related to OCB.
  1205. */
  1206. struct cdp_ocb_ops {
  1207. void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1208. struct ol_txrx_ocb_set_chan ocb_set_chan);
  1209. struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)(
  1210. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1211. };
  1212. /**
  1213. * struct cdp_peer_ops - mcl peer related ops
  1214. * @register_peer:
  1215. * @clear_peer:
  1216. * @find_peer_exist
  1217. * @find_peer_exist_on_vdev
  1218. * @find_peer_exist_on_other_vdev
  1219. * @peer_state_update:
  1220. * @get_vdevid:
  1221. * @register_ocb_peer:
  1222. * @peer_get_peer_mac_addr:
  1223. * @get_peer_state:
  1224. * @update_ibss_add_peer_num_of_vdev:
  1225. * @copy_mac_addr_raw:
  1226. * @add_last_real_peer:
  1227. * @is_vdev_restore_last_peer:
  1228. * @update_last_real_peer:
  1229. */
  1230. struct cdp_peer_ops {
  1231. QDF_STATUS (*register_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1232. struct ol_txrx_desc_type *sta_desc);
  1233. QDF_STATUS (*clear_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1234. struct qdf_mac_addr peer_addr);
  1235. bool (*find_peer_exist)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1236. uint8_t *peer_addr);
  1237. bool (*find_peer_exist_on_vdev)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1238. uint8_t *peer_addr);
  1239. bool (*find_peer_exist_on_other_vdev)(struct cdp_soc_t *soc,
  1240. uint8_t vdev_id,
  1241. uint8_t *peer_addr,
  1242. uint16_t max_bssid);
  1243. QDF_STATUS (*peer_state_update)(struct cdp_soc_t *soc,
  1244. uint8_t *peer_addr,
  1245. enum ol_txrx_peer_state state);
  1246. QDF_STATUS (*get_vdevid)(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  1247. uint8_t *vdev_id);
  1248. struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev,
  1249. struct qdf_mac_addr peer_addr);
  1250. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr);
  1251. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  1252. int (*get_peer_state)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1253. uint8_t *peer_mac);
  1254. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  1255. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_soc_t *soc,
  1256. uint8_t vdev_id,
  1257. int16_t peer_num_delta);
  1258. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  1259. ol_txrx_vdev_peer_remove_cb callback,
  1260. void *callback_context, bool remove_last_peer);
  1261. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  1262. ol_txrx_vdev_peer_remove_cb callback,
  1263. void *callback_context);
  1264. void (*copy_mac_addr_raw)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1265. uint8_t *bss_addr);
  1266. void (*add_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1267. uint8_t vdev_id);
  1268. bool (*is_vdev_restore_last_peer)(struct cdp_soc_t *soc,
  1269. uint8_t vdev_id,
  1270. uint8_t *peer_mac);
  1271. void (*update_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1272. uint8_t vdev_id, bool restore_last_peer);
  1273. void (*peer_detach_force_delete)(struct cdp_soc_t *soc_hdl,
  1274. uint8_t vdev_id, uint8_t *peer_addr);
  1275. void (*set_tdls_offchan_enabled)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1276. uint8_t *peer_mac, bool val);
  1277. void (*set_peer_as_tdls_peer)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1278. uint8_t *peer_mac, bool val);
  1279. void (*peer_flush_frags)(struct cdp_soc_t *soc_hdl,
  1280. uint8_t vdev_id, uint8_t *peer_mac);
  1281. };
  1282. /**
  1283. * struct cdp_mob_stats_ops - mcl mob stats ops
  1284. * @clear_stats: handler to clear ol txrx stats
  1285. * @stats: handler to update ol txrx stats
  1286. */
  1287. struct cdp_mob_stats_ops {
  1288. QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl,
  1289. uint8_t pdev_id, uint8_t bitmap);
  1290. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  1291. };
  1292. /**
  1293. * struct cdp_pmf_ops - mcl protected management frame ops
  1294. * @get_pn_info: handler to get pn info from peer
  1295. *
  1296. * Function pointers for pmf related operations.
  1297. */
  1298. struct cdp_pmf_ops {
  1299. void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac,
  1300. uint8_t vdev_id, uint8_t **last_pn_valid,
  1301. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  1302. };
  1303. #endif
  1304. #ifdef DP_FLOW_CTL
  1305. /**
  1306. * struct cdp_cfg_ops - mcl configuration ops
  1307. * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
  1308. * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
  1309. * @cfg_attach: hardcode the configuration parameters
  1310. * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
  1311. * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
  1312. * 1 enabled, 0 disabled.
  1313. * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
  1314. * indicate that mgmt over wmi is enabled
  1315. * or not,
  1316. * 1 for enabled, 0 for disable
  1317. * @is_high_latency: get device is high or low latency device,
  1318. * 1 high latency bus, 0 low latency bus
  1319. * @set_flow_control_parameters: set flow control parameters
  1320. * @set_flow_steering: set flow_steering_enabled flag
  1321. * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
  1322. * @set_new_htt_msg_format: set new_htt_msg_format flag
  1323. * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
  1324. * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
  1325. * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
  1326. * 1 enabled, 0 disabled.
  1327. * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
  1328. * 1 enabled, 0 disabled.
  1329. */
  1330. struct cdp_cfg_ops {
  1331. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  1332. uint8_t disable_rx_fwd);
  1333. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  1334. uint8_t val);
  1335. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  1336. void (*vdev_rx_set_intrabss_fwd)(struct cdp_soc_t *soc_hdl,
  1337. uint8_t vdev_id, bool val);
  1338. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  1339. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  1340. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  1341. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  1342. void *param);
  1343. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1344. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1345. void (*set_new_htt_msg_format)(uint8_t val);
  1346. void (*set_peer_unmap_conf_support)(bool val);
  1347. bool (*get_peer_unmap_conf_support)(void);
  1348. void (*set_tx_compl_tsf64)(bool val);
  1349. bool (*get_tx_compl_tsf64)(void);
  1350. };
  1351. /**
  1352. * struct cdp_flowctl_ops - mcl flow control
  1353. * @flow_pool_map_handler: handler to map flow_id and pool descriptors
  1354. * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors
  1355. * @register_pause_cb: handler to register tx pause callback
  1356. * @set_desc_global_pool_size: handler to set global pool size
  1357. * @dump_flow_pool_info: handler to dump global and flow pool info
  1358. * @tx_desc_thresh_reached: handler to set tx desc threshold
  1359. *
  1360. * Function pointers for operations related to flow control
  1361. */
  1362. struct cdp_flowctl_ops {
  1363. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  1364. uint8_t pdev_id,
  1365. uint8_t vdev_id);
  1366. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  1367. uint8_t pdev_id,
  1368. uint8_t vdev_id);
  1369. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  1370. tx_pause_callback);
  1371. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  1372. void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl);
  1373. bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl,
  1374. uint8_t vdev_id);
  1375. };
  1376. /**
  1377. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  1378. * @register_tx_flow_control: Register tx flow control callback
  1379. * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev
  1380. * @set_vdev_os_queue_status: Set vdev queue status
  1381. * @deregister_tx_flow_control_cb: Deregister tx flow control callback
  1382. * @flow_control_cb: Call osif flow control callback
  1383. * @get_tx_resource: Get tx resources and comapre with watermark
  1384. * @ll_set_tx_pause_q_depth: set pause queue depth
  1385. * @vdev_flush: Flush all packets on a particular vdev
  1386. * @vdev_pause: Pause a particular vdev
  1387. * @vdev_unpause: Unpause a particular vdev
  1388. *
  1389. * Function pointers for operations related to flow control
  1390. */
  1391. struct cdp_lflowctl_ops {
  1392. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  1393. int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl,
  1394. uint8_t pdev_id,
  1395. tx_pause_callback flowcontrol);
  1396. int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl,
  1397. uint8_t vdev_id, uint32_t chan_freq);
  1398. int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl,
  1399. uint8_t vdev_id,
  1400. enum netif_action_type action);
  1401. #else
  1402. int (*register_tx_flow_control)(
  1403. struct cdp_soc_t *soc_hdl,
  1404. uint8_t vdev_id,
  1405. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  1406. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  1407. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  1408. int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl,
  1409. uint8_t vdev_id);
  1410. void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1411. bool tx_resume);
  1412. bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1413. struct qdf_mac_addr peer_addr,
  1414. unsigned int low_watermark,
  1415. unsigned int high_watermark_offset);
  1416. int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1417. int pause_q_depth);
  1418. void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1419. void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1420. uint32_t reason, uint32_t pause_type);
  1421. void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1422. uint32_t reason, uint32_t pause_type);
  1423. };
  1424. /**
  1425. * struct cdp_throttle_ops - mcl throttle ops
  1426. * @throttle_init_period: handler to initialize tx throttle time
  1427. * @throttle_set_level: handler to set tx throttle level
  1428. */
  1429. struct cdp_throttle_ops {
  1430. void (*throttle_init_period)(struct cdp_soc_t *soc_hdl,
  1431. uint8_t pdev_id, int period,
  1432. uint8_t *dutycycle_level);
  1433. void (*throttle_set_level)(struct cdp_soc_t *soc_hdl,
  1434. uint8_t pdev_id, int level);
  1435. };
  1436. #endif
  1437. #ifdef IPA_OFFLOAD
  1438. /**
  1439. * struct cdp_ipa_ops - mcl ipa data path ops
  1440. * @ipa_get_resource:
  1441. * @ipa_set_doorbell_paddr:
  1442. * @ipa_set_active:
  1443. * @ipa_op_response:
  1444. * @ipa_register_op_cb:
  1445. * @ipa_get_stat:
  1446. * @ipa_tx_data_frame:
  1447. * @ipa_tx_buf_smmu_mapping: Create SMMU mappings for Tx
  1448. * @ipa_tx_buf_smmu_unmapping: Release SMMU mappings for Tx
  1449. * buffers to IPA
  1450. */
  1451. struct cdp_ipa_ops {
  1452. QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl,
  1453. uint8_t pdev_id);
  1454. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl,
  1455. uint8_t pdev_id);
  1456. QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1457. bool uc_active, bool is_tx);
  1458. QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl,
  1459. uint8_t pdev_id, uint8_t *op_msg);
  1460. QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl,
  1461. uint8_t pdev_id,
  1462. void (*ipa_uc_op_cb_type)
  1463. (uint8_t *op_msg, void *osif_ctxt),
  1464. void *usr_ctxt);
  1465. void (*ipa_deregister_op_cb)(struct cdp_soc_t *soc_hdl,
  1466. uint8_t pdev_id);
  1467. QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1468. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl,
  1469. uint8_t vdev_id, qdf_nbuf_t skb);
  1470. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  1471. uint32_t value);
  1472. #ifdef FEATURE_METERING
  1473. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl,
  1474. uint8_t pdev_id,
  1475. uint8_t reset_stats);
  1476. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl,
  1477. uint8_t pdev_id, uint64_t quota_bytes);
  1478. #endif
  1479. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl,
  1480. uint8_t pdev_id);
  1481. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl,
  1482. uint8_t pdev_id);
  1483. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  1484. defined(CONFIG_IPA_WDI_UNIFIED_API)
  1485. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1486. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1487. void *ipa_wdi_meter_notifier_cb,
  1488. uint32_t ipa_desc_size, void *ipa_priv,
  1489. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1490. uint32_t *rx_pipe_handle, bool is_smmu_enabled,
  1491. qdf_ipa_sys_connect_params_t *sys_in,
  1492. bool over_gsi);
  1493. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  1494. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1495. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1496. void *ipa_wdi_meter_notifier_cb,
  1497. uint32_t ipa_desc_size, void *ipa_priv,
  1498. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1499. uint32_t *rx_pipe_handle);
  1500. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  1501. QDF_STATUS (*ipa_cleanup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1502. uint32_t tx_pipe_handle,
  1503. uint32_t rx_pipe_handle);
  1504. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  1505. qdf_ipa_client_type_t prod_client,
  1506. qdf_ipa_client_type_t cons_client,
  1507. uint8_t session_id, bool is_ipv6_enabled);
  1508. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  1509. QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl,
  1510. uint8_t pdev_id);
  1511. QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl,
  1512. uint8_t pdev_id);
  1513. QDF_STATUS (*ipa_set_perf_level)(int client,
  1514. uint32_t max_supported_bw_mbps);
  1515. bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1516. qdf_nbuf_t nbuf, bool *fwd_success);
  1517. QDF_STATUS (*ipa_tx_buf_smmu_mapping)(struct cdp_soc_t *soc_hdl,
  1518. uint8_t pdev_id);
  1519. QDF_STATUS (*ipa_tx_buf_smmu_unmapping)(struct cdp_soc_t *soc_hdl,
  1520. uint8_t pdev_id);
  1521. };
  1522. #endif
  1523. #ifdef DP_POWER_SAVE
  1524. /**
  1525. * struct cdp_tx_delay_ops - mcl tx delay ops
  1526. * @tx_delay: handler to get tx packet delay
  1527. * @tx_delay_hist: handler to get tx packet delay histogram
  1528. * @tx_packet_count: handler to get tx packet count
  1529. * @tx_set_compute_interval: update compute interval period for TSM stats
  1530. *
  1531. * Function pointer for operations related to tx delay.
  1532. */
  1533. struct cdp_tx_delay_ops {
  1534. void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1535. uint32_t *queue_delay_microsec,
  1536. uint32_t *tx_delay_microsec, int category);
  1537. void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1538. uint16_t *bin_values, int category);
  1539. void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1540. uint16_t *out_packet_count,
  1541. uint16_t *out_packet_loss_count, int category);
  1542. void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl,
  1543. uint8_t pdev_id, uint32_t interval);
  1544. };
  1545. /**
  1546. * struct cdp_bus_ops - mcl bus suspend/resume ops
  1547. * @bus_suspend: handler for bus suspend
  1548. * @bus_resume: handler for bus resume
  1549. * @process_wow_ack_rsp: handler for wow ack response
  1550. * @process_target_suspend_req: handler for target suspend request
  1551. */
  1552. struct cdp_bus_ops {
  1553. QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1554. QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1555. void (*process_wow_ack_rsp)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1556. void (*process_target_suspend_req)(struct cdp_soc_t *soc_hdl,
  1557. uint8_t pdev_id);
  1558. };
  1559. #endif
  1560. #ifdef RECEIVE_OFFLOAD
  1561. /**
  1562. * struct cdp_rx_offld_ops - mcl host receive offload ops
  1563. * @register_rx_offld_flush_cb:
  1564. * @deregister_rx_offld_flush_cb:
  1565. */
  1566. struct cdp_rx_offld_ops {
  1567. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1568. void (*deregister_rx_offld_flush_cb)(void);
  1569. };
  1570. #endif
  1571. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1572. /**
  1573. * struct cdp_cfr_ops - host cfr ops
  1574. * @txrx_cfr_filter: Handler to configure host rx monitor status ring
  1575. * @txrx_get_cfr_rcc: Handler to get CFR mode
  1576. * @txrx_set_cfr_rcc: Handler to enable/disable CFR mode
  1577. * @txrx_get_cfr_dbg_stats: Handler to get debug statistics for CFR mode
  1578. * @txrx_clear_cfr_dbg_stats: Handler to clear debug statistics for CFR mode
  1579. * @txrx_enable_mon_reap_timer: Enable/Disable reap timer of monitor status ring
  1580. */
  1581. struct cdp_cfr_ops {
  1582. void (*txrx_cfr_filter)(struct cdp_soc_t *soc_hdl,
  1583. uint8_t pdev_id,
  1584. bool enable,
  1585. struct cdp_monitor_filter *filter_val);
  1586. bool (*txrx_get_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1587. uint8_t pdev_id);
  1588. void (*txrx_set_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1589. uint8_t pdev_id,
  1590. bool enable);
  1591. void (*txrx_get_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1592. uint8_t pdev_id,
  1593. struct cdp_cfr_rcc_stats *buf);
  1594. void (*txrx_clear_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1595. uint8_t pdev_id);
  1596. void (*txrx_enable_mon_reap_timer)(struct cdp_soc_t *soc_hdl,
  1597. uint8_t pdev_id,
  1598. bool enable);
  1599. };
  1600. #endif
  1601. #ifdef WLAN_SUPPORT_MSCS
  1602. /**
  1603. * struct cdp_mscs_ops - data path ops for MSCS
  1604. * @mscs_peer_lookup_n_get_priority:
  1605. */
  1606. struct cdp_mscs_ops {
  1607. int (*mscs_peer_lookup_n_get_priority)(struct cdp_soc_t *soc,
  1608. uint8_t *src_mac,
  1609. uint8_t *dst_mac,
  1610. qdf_nbuf_t nbuf);
  1611. };
  1612. #endif
  1613. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1614. /**
  1615. * struct cdp_mesh_latency_ops - data path ops for Mesh latency
  1616. * @mesh_latency_update_peer_parameter:
  1617. */
  1618. struct cdp_mesh_latency_ops {
  1619. QDF_STATUS (*mesh_latency_update_peer_parameter)(
  1620. struct cdp_soc_t *soc,
  1621. uint8_t *dest_mac, uint32_t service_interval_dl,
  1622. uint32_t burst_size_dl, uint32_t service_interval_ul,
  1623. uint32_t burst_size_ul, uint16_t priority,
  1624. uint8_t add_or_sub);
  1625. };
  1626. #endif
  1627. struct cdp_ops {
  1628. struct cdp_cmn_ops *cmn_drv_ops;
  1629. struct cdp_ctrl_ops *ctrl_ops;
  1630. struct cdp_me_ops *me_ops;
  1631. struct cdp_mon_ops *mon_ops;
  1632. struct cdp_host_stats_ops *host_stats_ops;
  1633. struct cdp_wds_ops *wds_ops;
  1634. struct cdp_raw_ops *raw_ops;
  1635. struct cdp_pflow_ops *pflow_ops;
  1636. #ifdef DP_PEER_EXTENDED_API
  1637. struct cdp_misc_ops *misc_ops;
  1638. struct cdp_peer_ops *peer_ops;
  1639. struct cdp_ocb_ops *ocb_ops;
  1640. struct cdp_mob_stats_ops *mob_stats_ops;
  1641. struct cdp_pmf_ops *pmf_ops;
  1642. #endif
  1643. #ifdef DP_FLOW_CTL
  1644. struct cdp_cfg_ops *cfg_ops;
  1645. struct cdp_flowctl_ops *flowctl_ops;
  1646. struct cdp_lflowctl_ops *l_flowctl_ops;
  1647. struct cdp_throttle_ops *throttle_ops;
  1648. #endif
  1649. #ifdef DP_POWER_SAVE
  1650. struct cdp_bus_ops *bus_ops;
  1651. struct cdp_tx_delay_ops *delay_ops;
  1652. #endif
  1653. #ifdef IPA_OFFLOAD
  1654. struct cdp_ipa_ops *ipa_ops;
  1655. #endif
  1656. #ifdef RECEIVE_OFFLOAD
  1657. struct cdp_rx_offld_ops *rx_offld_ops;
  1658. #endif
  1659. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1660. struct cdp_cfr_ops *cfr_ops;
  1661. #endif
  1662. #ifdef WLAN_SUPPORT_MSCS
  1663. struct cdp_mscs_ops *mscs_ops;
  1664. #endif
  1665. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1666. struct cdp_mesh_latency_ops *mesh_latency_ops;
  1667. #endif
  1668. };
  1669. #endif