cdp_txrx_ops.h 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879
  1. /*
  2. * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #include <wmi_unified_api.h>
  32. #include <wdi_event_api.h>
  33. #ifdef IPA_OFFLOAD
  34. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  35. defined(CONFIG_IPA_WDI_UNIFIED_API)
  36. #include <qdf_ipa_wdi3.h>
  37. #else
  38. #include <qdf_ipa.h>
  39. #endif
  40. #endif
  41. /**
  42. * bitmap values to indicate special handling of peer_delete
  43. */
  44. #define CDP_PEER_DELETE_NO_SPECIAL 0
  45. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  46. struct hif_opaque_softc;
  47. /* same as ieee80211_nac_param */
  48. enum cdp_nac_param_cmd {
  49. /* IEEE80211_NAC_PARAM_ADD */
  50. CDP_NAC_PARAM_ADD = 1,
  51. /* IEEE80211_NAC_PARAM_DEL */
  52. CDP_NAC_PARAM_DEL,
  53. /* IEEE80211_NAC_PARAM_LIST */
  54. CDP_NAC_PARAM_LIST,
  55. };
  56. #define CDP_DELBA_INTERVAL_MS 3000
  57. /**
  58. * enum cdp_delba_rcode - CDP reason code for sending DELBA
  59. * @CDP_DELBA_REASON_NONE: None
  60. * @CDP_DELBA_2K_JUMP: Sending DELBA from 2k_jump_handle
  61. */
  62. enum cdp_delba_rcode {
  63. CDP_DELBA_REASON_NONE = 0,
  64. CDP_DELBA_2K_JUMP,
  65. };
  66. /**
  67. * enum vdev_peer_protocol_enter_exit - whether ingress or egress
  68. * @CDP_VDEV_PEER_PROTOCOL_IS_INGRESS: ingress
  69. * @CDP_VDEV_PEER_PROTOCOL_IS_EGRESS: egress
  70. *
  71. * whether ingress or egress
  72. */
  73. enum vdev_peer_protocol_enter_exit {
  74. CDP_VDEV_PEER_PROTOCOL_IS_INGRESS,
  75. CDP_VDEV_PEER_PROTOCOL_IS_EGRESS
  76. };
  77. /**
  78. * enum vdev_peer_protocol_tx_rx - whether tx or rx
  79. * @CDP_VDEV_PEER_PROTOCOL_IS_TX: tx
  80. * @CDP_VDEV_PEER_PROTOCOL_IS_RX: rx
  81. *
  82. * whether tx or rx
  83. */
  84. enum vdev_peer_protocol_tx_rx {
  85. CDP_VDEV_PEER_PROTOCOL_IS_TX,
  86. CDP_VDEV_PEER_PROTOCOL_IS_RX
  87. };
  88. /**
  89. * enum vdev_ll_conn_actions - Actions to informvdev about
  90. * low latency connection.
  91. * @CDP_VDEV_LL_CONN_ADD: Add Low latency connection
  92. * @CDP_VDEV_LL_CONN_DEL: Delete Low latency connection
  93. */
  94. enum vdev_ll_conn_actions {
  95. CDP_VDEV_LL_CONN_ADD,
  96. CDP_VDEV_LL_CONN_DEL
  97. };
  98. /******************************************************************************
  99. *
  100. * Control Interface (A Interface)
  101. *
  102. *****************************************************************************/
  103. struct cdp_cmn_ops {
  104. QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  105. int (*txrx_pdev_attach_target)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  106. QDF_STATUS (*txrx_vdev_attach)
  107. (struct cdp_soc_t *soc, uint8_t pdev_id,
  108. struct cdp_vdev_info *vdev_info);
  109. QDF_STATUS
  110. (*txrx_vdev_detach)(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
  111. ol_txrx_vdev_delete_cb callback,
  112. void *cb_context);
  113. QDF_STATUS (*txrx_pdev_attach)
  114. (ol_txrx_soc_handle soc,
  115. struct cdp_pdev_attach_params *params);
  116. int (*txrx_pdev_post_attach)(struct cdp_soc_t *soc, uint8_t pdev_id);
  117. void
  118. (*txrx_pdev_pre_detach)(struct cdp_soc_t *soc, uint8_t pdev_id,
  119. int force);
  120. QDF_STATUS
  121. (*txrx_pdev_detach)(struct cdp_soc_t *psoc, uint8_t pdev_id,
  122. int force);
  123. /**
  124. * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
  125. * @soc: soc dp handle
  126. * @pdev_id: id of Dp pdev handle
  127. * @force: Force deinit or not
  128. *
  129. * Return: QDF_STATUS
  130. */
  131. QDF_STATUS
  132. (*txrx_pdev_deinit)(struct cdp_soc_t *soc, uint8_t pdev_id,
  133. int force);
  134. QDF_STATUS
  135. (*txrx_peer_create)
  136. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  137. uint8_t *peer_mac_addr, enum cdp_peer_type peer_type);
  138. QDF_STATUS
  139. (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  140. uint8_t *peer_mac,
  141. struct cdp_peer_setup_info *setup_info);
  142. QDF_STATUS
  143. (*txrx_cp_peer_del_response)
  144. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  145. uint8_t *peer_mac_addr);
  146. QDF_STATUS
  147. (*txrx_peer_teardown)
  148. (struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *peer_mac);
  149. int (*txrx_peer_add_ast)
  150. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  151. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  152. uint32_t flags);
  153. int (*txrx_peer_update_ast)
  154. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  155. uint8_t *mac_addr, uint32_t flags);
  156. bool (*txrx_peer_get_ast_info_by_soc)
  157. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  158. struct cdp_ast_entry_info *ast_entry_info);
  159. bool (*txrx_peer_get_ast_info_by_pdev)
  160. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  161. uint8_t pdev_id,
  162. struct cdp_ast_entry_info *ast_entry_info);
  163. QDF_STATUS (*txrx_peer_ast_delete_by_soc)
  164. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  165. txrx_ast_free_cb callback,
  166. void *cookie);
  167. QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
  168. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  169. uint8_t pdev_id,
  170. txrx_ast_free_cb callback,
  171. void *cookie);
  172. QDF_STATUS
  173. (*txrx_peer_delete)(struct cdp_soc_t *soc, uint8_t vdev_id,
  174. uint8_t *peer_mac, uint32_t bitmap);
  175. QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_soc_t *soc,
  176. uint8_t vdev_id,
  177. uint8_t smart_monitor);
  178. void (*txrx_peer_delete_sync)(struct cdp_soc_t *soc, uint8_t vdev_id,
  179. uint8_t *peer_mac,
  180. QDF_STATUS(*delete_cb)(
  181. uint8_t vdev_id,
  182. uint32_t peerid_cnt,
  183. uint16_t *peerid_list),
  184. uint32_t bitmap);
  185. void (*txrx_peer_unmap_sync_cb_set)(struct cdp_soc_t *soc_hdl,
  186. uint8_t pdev_id,
  187. ol_txrx_peer_unmap_sync_cb
  188. peer_unmap_sync);
  189. QDF_STATUS
  190. (*txrx_get_peer_mac_from_peer_id)
  191. (struct cdp_soc_t *cdp_soc,
  192. uint32_t peer_id, uint8_t *peer_mac);
  193. void
  194. (*txrx_vdev_tx_lock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  195. void
  196. (*txrx_vdev_tx_unlock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  197. QDF_STATUS
  198. (*txrx_ath_getstats)(struct cdp_soc_t *soc, uint8_t id,
  199. struct cdp_dev_stats *stats, uint8_t type);
  200. QDF_STATUS
  201. (*txrx_set_gid_flag)(struct cdp_soc_t *soc, uint8_t pdev_id,
  202. u_int8_t *mem_status,
  203. u_int8_t *user_position);
  204. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_soc_t *soc,
  205. uint8_t pdev_id);
  206. QDF_STATUS
  207. (*txrx_if_mgmt_drain)(struct cdp_soc_t *soc, uint8_t pdev_id,
  208. int force);
  209. QDF_STATUS
  210. (*txrx_set_curchan)(struct cdp_soc_t *soc, uint8_t pdev_id,
  211. uint32_t chan_mhz);
  212. QDF_STATUS
  213. (*txrx_set_privacy_filters)
  214. (struct cdp_soc_t *soc, uint8_t vdev_id, void *filter,
  215. uint32_t num);
  216. uint32_t (*txrx_get_cfg)(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg);
  217. /********************************************************************
  218. * Data Interface (B Interface)
  219. ********************************************************************/
  220. QDF_STATUS
  221. (*txrx_vdev_register)(struct cdp_soc_t *soc, uint8_t vdev_id,
  222. ol_osif_vdev_handle osif_vdev,
  223. struct ol_txrx_ops *txrx_ops);
  224. int (*txrx_mgmt_send)(struct cdp_soc_t *soc, uint8_t vdev_id,
  225. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  226. int (*txrx_mgmt_send_ext)(struct cdp_soc_t *soc, uint8_t vdev_id,
  227. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  228. uint8_t use_6mbps, uint16_t chanfreq);
  229. /**
  230. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  231. * callback function
  232. */
  233. QDF_STATUS
  234. (*txrx_mgmt_tx_cb_set)(struct cdp_soc_t *soc, uint8_t pdev_id,
  235. uint8_t type,
  236. ol_txrx_mgmt_tx_cb download_cb,
  237. ol_txrx_mgmt_tx_cb ota_ack_cb,
  238. void *ctxt);
  239. /**
  240. * ol_txrx_data_tx_cb - Function registered with the data path
  241. * that is called when tx frames marked as "no free" are
  242. * done being transmitted
  243. */
  244. void (*txrx_data_tx_cb_set)(struct cdp_soc_t *soc, uint8_t vdev_id,
  245. ol_txrx_data_tx_cb callback, void *ctxt);
  246. qdf_nbuf_t (*tx_send_exc)
  247. (ol_txrx_soc_handle soc, uint8_t vdev_id, qdf_nbuf_t msdu_list,
  248. struct cdp_tx_exception_metadata *tx_exc_metadata);
  249. /*******************************************************************
  250. * Statistics and Debugging Interface (C Interface)
  251. ********************************************************************/
  252. int (*txrx_aggr_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  253. int max_subfrms_ampdu,
  254. int max_subfrms_amsdu);
  255. A_STATUS
  256. (*txrx_fw_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  257. struct ol_txrx_stats_req *req,
  258. bool per_vdev, bool response_expected);
  259. int (*txrx_debug)(struct cdp_soc_t *soc, uint8_t vdev_id,
  260. int debug_specs);
  261. QDF_STATUS
  262. (*txrx_fw_stats_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  263. uint8_t cfg_stats_type, uint32_t cfg_val);
  264. void (*txrx_print_level_set)(unsigned level);
  265. /**
  266. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  267. * @soc: datapath soc handle
  268. * @vdev_id: vdev id
  269. *
  270. * Return: vdev mac address
  271. */
  272. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_soc_t *soc,
  273. uint8_t vdev_id);
  274. /**
  275. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  276. * @soc: datapath soc handle
  277. * @vdev_id: vdev id
  278. *
  279. * Return: Handle to control pdev
  280. */
  281. struct cdp_cfg *(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_soc_t *soc,
  282. uint8_t vdev_id);
  283. /**
  284. * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
  285. * @soc: datapath soc handle
  286. * @pdev: pdev id
  287. *
  288. * Return: vdev_id
  289. */
  290. uint8_t (*txrx_get_mon_vdev_from_pdev)(struct cdp_soc_t *soc,
  291. uint8_t pdev_id);
  292. void (*txrx_soc_detach)(struct cdp_soc_t *soc);
  293. /**
  294. * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
  295. * @soc: Opaque Dp handle
  296. *
  297. * Return None
  298. */
  299. void (*txrx_soc_deinit)(struct cdp_soc_t *soc);
  300. /**
  301. * txrx_soc_init() - Initialize dp soc and dp ring memory
  302. * @soc: Opaque Dp handle
  303. * @ctrl_psoc: Opaque Cp handle
  304. * @htchdl: Opaque htc handle
  305. * @hifhdl: Opaque hif handle
  306. *
  307. * Return: None
  308. */
  309. void *(*txrx_soc_init)(struct cdp_soc_t *soc,
  310. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  311. struct hif_opaque_softc *hif_handle,
  312. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  313. struct ol_if_ops *ol_ops, uint16_t device_id);
  314. QDF_STATUS (*txrx_pdev_init)(ol_txrx_soc_handle soc,
  315. HTC_HANDLE htc_handle,
  316. qdf_device_t qdf_osdev,
  317. uint8_t pdev_id);
  318. /**
  319. * txrx_tso_soc_attach() - TSO attach handler triggered during
  320. * dynamic tso activation
  321. * @soc: Opaque Dp handle
  322. *
  323. * Return: QDF status
  324. */
  325. QDF_STATUS (*txrx_tso_soc_attach)(struct cdp_soc_t *soc);
  326. /**
  327. * txrx_tso_soc_detach() - TSO detach handler triggered during
  328. * dynamic tso de-activation
  329. * @soc: Opaque Dp handle
  330. *
  331. * Return: QDF status
  332. */
  333. QDF_STATUS (*txrx_tso_soc_detach)(struct cdp_soc_t *soc);
  334. int (*addba_resp_tx_completion)(struct cdp_soc_t *cdp_soc,
  335. uint8_t *peer_mac,
  336. uint16_t vdev_id, uint8_t tid,
  337. int status);
  338. int (*addba_requestprocess)(struct cdp_soc_t *cdp_soc,
  339. uint8_t *peer_mac,
  340. uint16_t vdev_id,
  341. uint8_t dialogtoken,
  342. uint16_t tid, uint16_t batimeout,
  343. uint16_t buffersize,
  344. uint16_t startseqnum);
  345. QDF_STATUS
  346. (*addba_responsesetup)(struct cdp_soc_t *cdp_soc,
  347. uint8_t *peer_mac,
  348. uint16_t vdev_id, uint8_t tid,
  349. uint8_t *dialogtoken, uint16_t *statuscode,
  350. uint16_t *buffersize, uint16_t *batimeout);
  351. int (*delba_process)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  352. uint16_t vdev_id, int tid, uint16_t reasoncode);
  353. /**
  354. * delba_tx_completion() - Indicate delba tx status
  355. * @cdp_soc: soc handle
  356. * @peer_mac: Peer mac address
  357. * @vdev_id: vdev id
  358. * @tid: Tid number
  359. * @status: Tx completion status
  360. *
  361. * Return: 0 on Success, 1 on failure
  362. */
  363. int (*delba_tx_completion)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  364. uint16_t vdev_id,
  365. uint8_t tid, int status);
  366. QDF_STATUS
  367. (*set_addba_response)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  368. uint16_t vdev_id, uint8_t tid,
  369. uint16_t statuscode);
  370. QDF_STATUS
  371. (*set_vdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  372. uint8_t vdev_id, uint8_t map_id);
  373. int (*txrx_get_total_per)(struct cdp_soc_t *soc, uint8_t pdev_id);
  374. void (*flush_cache_rx_queue)(void);
  375. QDF_STATUS (*set_pdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  376. uint8_t pdev_id,
  377. uint8_t map_id,
  378. uint8_t tos, uint8_t tid);
  379. QDF_STATUS (*txrx_stats_request)(struct cdp_soc_t *soc_handle,
  380. uint8_t vdev_id,
  381. struct cdp_txrx_stats_req *req);
  382. QDF_STATUS (*display_stats)(struct cdp_soc_t *psoc, uint16_t value,
  383. enum qdf_stats_verbosity_level level);
  384. QDF_STATUS (*txrx_intr_attach)(struct cdp_soc_t *soc_handle);
  385. void (*txrx_intr_detach)(struct cdp_soc_t *soc_handle);
  386. QDF_STATUS (*set_pn_check)(struct cdp_soc_t *soc_handle,
  387. uint8_t vdev_id, uint8_t *peermac,
  388. enum cdp_sec_type sec_type,
  389. uint32_t *rx_pn);
  390. QDF_STATUS(*set_key_sec_type)(struct cdp_soc_t *soc_handle,
  391. uint8_t vdev_id, uint8_t *peermac,
  392. enum cdp_sec_type sec_type,
  393. bool is_unicast);
  394. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  395. struct cdp_config_params *params);
  396. void *(*get_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  397. void (*set_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  398. void *dp_hdl);
  399. void *(*get_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  400. uint8_t vdev_id);
  401. QDF_STATUS (*set_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  402. uint8_t vdev_id,
  403. uint16_t size);
  404. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  405. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  406. void *dp_txrx_handle);
  407. QDF_STATUS (*map_pdev_to_lmac)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  408. uint32_t lmac_id);
  409. QDF_STATUS (*handle_mode_change)(ol_txrx_soc_handle soc,
  410. uint8_t pdev_id, uint32_t lmac_id);
  411. QDF_STATUS (*set_pdev_status_down)(struct cdp_soc_t *soc_handle,
  412. uint8_t pdev_id, bool is_pdev_down);
  413. QDF_STATUS (*txrx_peer_reset_ast)
  414. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
  415. uint8_t *peer_macaddr, uint8_t vdev_id);
  416. QDF_STATUS (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  417. uint8_t vdev_id);
  418. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  419. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  420. uint8_t ac, uint32_t value);
  421. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  422. uint8_t ac, uint32_t *value);
  423. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  424. uint32_t num_peers,
  425. uint32_t max_ast_index,
  426. uint8_t peer_map_unmap_v);
  427. QDF_STATUS (*set_soc_param)(ol_txrx_soc_handle soc,
  428. enum cdp_soc_param_t param,
  429. uint32_t value);
  430. ol_txrx_tx_fp tx_send;
  431. /**
  432. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  433. * to deliver pkt to stack.
  434. * @soc: datapath soc handle
  435. * @vdev: vdev id
  436. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  437. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  438. */
  439. void (*txrx_get_os_rx_handles_from_vdev)
  440. (ol_txrx_soc_handle soc,
  441. uint8_t vdev_id,
  442. ol_txrx_rx_fp *stack_fn,
  443. ol_osif_vdev_handle *osif_vdev);
  444. void (*set_rate_stats_ctx)(struct cdp_soc_t *soc,
  445. void *ctx);
  446. int (*txrx_classify_update)
  447. (struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t skb,
  448. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  449. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  450. enum cdp_capabilities dp_caps);
  451. void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
  452. QDF_STATUS (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
  453. uint8_t pdev_id,
  454. void *buf);
  455. void* (*txrx_peer_get_rdkstats_ctx)(struct cdp_soc_t *soc,
  456. uint8_t vdev_id,
  457. uint8_t *mac_addr);
  458. QDF_STATUS (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
  459. uint8_t pdev_id);
  460. QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_soc_t *soc,
  461. uint8_t pdev_id,
  462. uint8_t pcp, uint8_t tid);
  463. QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_soc_t *soc,
  464. uint8_t vdev_id,
  465. uint8_t pcp, uint8_t tid);
  466. #ifdef QCA_MULTIPASS_SUPPORT
  467. QDF_STATUS (*set_vlan_groupkey)(struct cdp_soc_t *soc, uint8_t vdev_id,
  468. uint16_t vlan_id, uint16_t group_key);
  469. #endif
  470. uint16_t (*get_peer_mac_list)
  471. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  472. u_int8_t newmac[][QDF_MAC_ADDR_SIZE], uint16_t mac_cnt,
  473. bool limit);
  474. #ifdef QCA_SUPPORT_WDS_EXTENDED
  475. uint16_t (*get_wds_ext_peer_id)(ol_txrx_soc_handle soc,
  476. uint8_t vdev_id,
  477. uint8_t *mac);
  478. QDF_STATUS (*set_wds_ext_peer_rx)(ol_txrx_soc_handle soc,
  479. uint8_t vdev_id,
  480. uint8_t *mac,
  481. ol_txrx_rx_fp rx,
  482. ol_osif_peer_handle osif_peer);
  483. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  484. void (*txrx_drain)(ol_txrx_soc_handle soc);
  485. int (*get_free_desc_poolsize)(struct cdp_soc_t *soc);
  486. #ifdef WLAN_SYSFS_DP_STATS
  487. QDF_STATUS (*txrx_sysfs_fill_stats)(ol_txrx_soc_handle soc,
  488. char *buf, uint32_t buf_size);
  489. QDF_STATUS (*txrx_sysfs_set_stat_type)(ol_txrx_soc_handle soc,
  490. uint32_t stat_type,
  491. uint32_t mac_id);
  492. #endif /* WLAN_SYSFS_DP_STATS */
  493. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  494. void (*set_pkt_capture_mode)(struct cdp_soc_t *soc, bool val);
  495. #endif
  496. #ifdef FEATURE_RUNTIME_PM
  497. void (*set_rtpm_tput_policy)(struct cdp_soc_t *soc, bool val);
  498. #endif
  499. };
  500. struct cdp_ctrl_ops {
  501. int
  502. (*txrx_mempools_attach)(ol_txrx_soc_handle dp_soc);
  503. int
  504. (*txrx_update_filter_neighbour_peers)(
  505. struct cdp_soc_t *soc, uint8_t vdev_id,
  506. uint32_t cmd, uint8_t *macaddr);
  507. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  508. /**
  509. * @brief Update the authorize peer object at association time
  510. * @details
  511. * For the host-based implementation of rate-control, it
  512. * updates the peer/node-related parameters within rate-control
  513. * context of the peer at association.
  514. *
  515. * @param soc_hdl - pointer to the soc object
  516. * @param vdev_id - id of the virtual object
  517. * @param peer_mac - mac address of the node's object
  518. * @authorize - either to authorize or unauthorize peer
  519. *
  520. * @return QDF_STATUS
  521. */
  522. QDF_STATUS
  523. (*txrx_peer_authorize)(struct cdp_soc_t *soc_hdl,
  524. uint8_t vdev_id,
  525. uint8_t *peer_mac,
  526. u_int32_t authorize);
  527. bool
  528. (*txrx_peer_get_authorize)(struct cdp_soc_t *soc_hdl,
  529. uint8_t vdev_id,
  530. uint8_t *peer_mac);
  531. void (*tx_flush_buffers)(struct cdp_soc_t *soc, uint8_t vdev_id);
  532. int (*txrx_is_target_ar900b)(struct cdp_soc_t *soc_hdl);
  533. QDF_STATUS
  534. (*txrx_set_vdev_param)(struct cdp_soc_t *soc, uint8_t vdev_id,
  535. enum cdp_vdev_param_type param,
  536. cdp_config_param_type val);
  537. /**
  538. * @brief Set the reo dest ring num of the radio
  539. * @details
  540. * Set the reo destination ring no on which we will receive
  541. * pkts for this radio.
  542. *
  543. * @txrx_soc - soc handle
  544. * @param pdev_id - id of physical device
  545. * @return the reo destination ring number
  546. * @param reo_dest_ring_num - value ranges between 1 - 4
  547. */
  548. QDF_STATUS (*txrx_set_pdev_reo_dest)(
  549. struct cdp_soc_t *txrx_soc,
  550. uint8_t pdev_id,
  551. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  552. /**
  553. * @brief Get the reo dest ring num of the radio
  554. * @details
  555. * Get the reo destination ring no on which we will receive
  556. * pkts for this radio.
  557. *
  558. * @txrx_soc - soc handle
  559. * @param pdev_id - id of physical device
  560. * @return the reo destination ring number
  561. */
  562. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  563. struct cdp_soc_t *txrx_soc,
  564. uint8_t pdev_id);
  565. int (*txrx_wdi_event_sub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  566. wdi_event_subscribe *event_cb_sub,
  567. uint32_t event);
  568. int (*txrx_wdi_event_unsub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  569. wdi_event_subscribe *event_cb_sub,
  570. uint32_t event);
  571. int (*txrx_get_sec_type)(ol_txrx_soc_handle soc, uint8_t vdev_id,
  572. uint8_t *peer_mac, uint8_t sec_idx);
  573. QDF_STATUS
  574. (*txrx_update_mgmt_txpow_vdev)(struct cdp_soc_t *soc,
  575. uint8_t vdev_id,
  576. uint8_t subtype, uint8_t tx_power);
  577. /**
  578. * txrx_set_pdev_param() - callback to set pdev parameter
  579. * @soc: opaque soc handle
  580. * @pdev_id:id of data path pdev handle
  581. * @val: value of pdev_tx_capture
  582. *
  583. * Return: status: 0 - Success, non-zero: Failure
  584. */
  585. QDF_STATUS (*txrx_set_pdev_param)(struct cdp_soc_t *soc,
  586. uint8_t pdev_id,
  587. enum cdp_pdev_param_type type,
  588. cdp_config_param_type val);
  589. QDF_STATUS (*txrx_get_pdev_param)(struct cdp_soc_t *soc,
  590. uint8_t pdev_id,
  591. enum cdp_pdev_param_type type,
  592. cdp_config_param_type *val);
  593. QDF_STATUS (*txrx_set_peer_param)(struct cdp_soc_t *soc,
  594. uint8_t vdev_id, uint8_t *peer_mac,
  595. enum cdp_peer_param_type param,
  596. cdp_config_param_type val);
  597. QDF_STATUS (*txrx_get_peer_param)(struct cdp_soc_t *soc,
  598. uint8_t vdev_id, uint8_t *peer_mac,
  599. enum cdp_peer_param_type param,
  600. cdp_config_param_type *val);
  601. void * (*txrx_get_pldev)(struct cdp_soc_t *soc, uint8_t pdev_id);
  602. void (*txrx_peer_flush_frags)(struct cdp_soc_t *soc, uint8_t vdev_id,
  603. uint8_t *peer_mac);
  604. #ifdef VDEV_PEER_PROTOCOL_COUNT
  605. void (*txrx_peer_protocol_cnt)(struct cdp_soc_t *soc,
  606. int8_t vdev_id,
  607. qdf_nbuf_t nbuf,
  608. bool is_egress,
  609. bool is_rx);
  610. #endif
  611. #ifdef ATH_SUPPORT_NAC_RSSI
  612. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_soc_t *cdp_soc,
  613. uint8_t vdev_id,
  614. enum cdp_nac_param_cmd cmd,
  615. char *bssid,
  616. char *client_macaddr,
  617. uint8_t chan_num);
  618. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_soc_t *cdp_soc,
  619. uint8_t vdev_id,
  620. char *macaddr,
  621. uint8_t *rssi);
  622. #endif
  623. #ifdef WLAN_SUPPORT_SCS
  624. QDF_STATUS
  625. (*txrx_enable_scs_params) (
  626. struct cdp_soc_t *soc, struct qdf_mac_addr
  627. *macaddr,
  628. uint8_t vdev_id,
  629. bool is_active);
  630. QDF_STATUS
  631. (*txrx_record_scs_params) (
  632. struct cdp_soc_t *soc, struct qdf_mac_addr
  633. *macaddr,
  634. uint8_t vdev_id,
  635. struct cdp_scs_params *scs_params,
  636. uint8_t entry_ctr,
  637. uint8_t scs_sessions);
  638. #endif
  639. #ifdef WLAN_SUPPORT_MSCS
  640. QDF_STATUS
  641. (*txrx_record_mscs_params) (
  642. struct cdp_soc_t *soc, uint8_t *macaddr,
  643. uint8_t vdev_id,
  644. struct cdp_mscs_params *mscs_params,
  645. bool active);
  646. #endif
  647. QDF_STATUS
  648. (*set_key)(struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *mac,
  649. bool is_unicast, uint32_t *key);
  650. QDF_STATUS (*txrx_get_vdev_param)(struct cdp_soc_t *soc,
  651. uint8_t vdev_id,
  652. enum cdp_vdev_param_type param,
  653. cdp_config_param_type *val);
  654. int (*enable_peer_based_pktlog)(struct cdp_soc_t *cdp_soc,
  655. uint8_t pdev_id,
  656. uint8_t *macaddr, uint8_t enb_dsb);
  657. QDF_STATUS
  658. (*calculate_delay_stats)(struct cdp_soc_t *cdp_soc,
  659. uint8_t vdev_id, qdf_nbuf_t nbuf);
  660. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  661. QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
  662. struct cdp_soc_t *soc, uint8_t pdev_id,
  663. uint32_t protocol_mask, uint16_t protocol_type,
  664. uint16_t tag);
  665. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  666. void (*txrx_dump_pdev_rx_protocol_tag_stats)(
  667. struct cdp_soc_t *soc, uint8_t pdev_id,
  668. uint16_t protocol_type);
  669. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  670. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  671. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  672. QDF_STATUS (*txrx_set_rx_flow_tag)(
  673. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  674. struct cdp_rx_flow_info *flow_info);
  675. QDF_STATUS (*txrx_dump_rx_flow_tag_stats)(
  676. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  677. struct cdp_rx_flow_info *flow_info);
  678. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  679. #ifdef QCA_MULTIPASS_SUPPORT
  680. void (*txrx_peer_set_vlan_id)(struct cdp_soc_t *cdp_soc,
  681. uint8_t vdev_id, uint8_t *peer_mac,
  682. uint16_t vlan_id);
  683. #endif
  684. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  685. QDF_STATUS (*txrx_update_peer_pkt_capture_params)(
  686. ol_txrx_soc_handle soc, uint8_t pdev_id,
  687. bool is_rx_pkt_cap_enable, uint8_t is_tx_pkt_cap_enable,
  688. uint8_t *peer_mac);
  689. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  690. QDF_STATUS
  691. (*txrx_set_psoc_param)(struct cdp_soc_t *soc,
  692. enum cdp_psoc_param_type param,
  693. cdp_config_param_type val);
  694. QDF_STATUS (*txrx_get_psoc_param)(ol_txrx_soc_handle soc,
  695. enum cdp_psoc_param_type type,
  696. cdp_config_param_type *val);
  697. #ifdef VDEV_PEER_PROTOCOL_COUNT
  698. /*
  699. * Enable per-peer protocol counters
  700. */
  701. void (*txrx_enable_peer_protocol_count)(struct cdp_soc_t *soc,
  702. int8_t vdev_id, bool enable);
  703. void (*txrx_set_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  704. int8_t vdev_id, int mask);
  705. int (*txrx_is_peer_protocol_count_enabled)(struct cdp_soc_t *soc,
  706. int8_t vdev_id);
  707. int (*txrx_get_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  708. int8_t vdev_id);
  709. #endif
  710. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  711. void (*txrx_set_delta_tsf)(struct cdp_soc_t *soc, uint8_t vdev_id,
  712. uint32_t delta_tsf);
  713. QDF_STATUS (*txrx_set_tsf_ul_delay_report)(struct cdp_soc_t *soc,
  714. uint8_t vdev_id,
  715. bool enable);
  716. QDF_STATUS (*txrx_get_uplink_delay)(struct cdp_soc_t *soc,
  717. uint8_t vdev_id,
  718. uint32_t *val);
  719. #endif
  720. };
  721. struct cdp_me_ops {
  722. void (*tx_me_alloc_descriptor)(struct cdp_soc_t *soc,
  723. uint8_t pdev_id);
  724. void (*tx_me_free_descriptor)(struct cdp_soc_t *soc, uint8_t pdev_id);
  725. uint16_t (*tx_me_convert_ucast)(struct cdp_soc_t *soc, uint8_t vdev_id,
  726. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  727. uint8_t newmaccnt, uint8_t tid,
  728. bool is_igmp);
  729. };
  730. struct cdp_mon_ops {
  731. QDF_STATUS (*txrx_reset_monitor_mode)
  732. (ol_txrx_soc_handle soc, uint8_t pdev_id, u_int8_t smart_monitor);
  733. QDF_STATUS (*txrx_deliver_tx_mgmt)
  734. (struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf);
  735. /* HK advance monitor filter support */
  736. QDF_STATUS (*txrx_set_advance_monitor_filter)
  737. (struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  738. struct cdp_monitor_filter *filter_val);
  739. /* Configure full monitor mode */
  740. QDF_STATUS
  741. (*config_full_mon_mode)(struct cdp_soc_t *soc, uint8_t val);
  742. QDF_STATUS (*soc_config_full_mon_mode)(struct cdp_pdev *cdp_pdev,
  743. uint8_t val);
  744. };
  745. struct cdp_host_stats_ops {
  746. int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  747. struct ol_txrx_stats_req *req);
  748. QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc,
  749. uint8_t vdev_id);
  750. QDF_STATUS
  751. (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  752. int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id,
  753. struct cdp_stats_extd *buf);
  754. /**
  755. * @brief Enable enhanced stats functionality.
  756. *
  757. * @param soc - the soc handle
  758. * @param pdev_id - pdev_id of pdev
  759. * @return - QDF_STATUS
  760. */
  761. QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc,
  762. uint8_t pdev_id);
  763. /**
  764. * @brief Disable enhanced stats functionality.
  765. *
  766. * @param soc - the soc handle
  767. * @param pdev_id - pdev_id of pdev
  768. * @return - QDF_STATUS
  769. */
  770. QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc,
  771. uint8_t pdev_id);
  772. QDF_STATUS
  773. (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  774. QDF_STATUS
  775. (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  776. QDF_STATUS
  777. (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  778. QDF_STATUS
  779. (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  780. QDF_STATUS
  781. (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  782. QDF_STATUS
  783. (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  784. QDF_STATUS
  785. (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  786. QDF_STATUS
  787. (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr);
  788. int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  789. struct ol_txrx_stats_req *req);
  790. int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc,
  791. uint8_t pdev_id,
  792. uint8_t *addr, void *stats,
  793. uint32_t last_tx_rate_mcs,
  794. uint32_t stats_id);
  795. QDF_STATUS
  796. (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  797. uint8_t *addr,
  798. uint32_t cap, uint32_t copy_stats);
  799. QDF_STATUS
  800. (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  801. void *data,
  802. uint32_t data_len);
  803. QDF_STATUS
  804. (*txrx_update_pdev_stats)(struct cdp_soc_t *soc,
  805. uint8_t pdev_id, void *data,
  806. uint16_t stats_id);
  807. QDF_STATUS
  808. (*txrx_get_peer_stats_param)(struct cdp_soc_t *soc,
  809. uint8_t vdev_id,
  810. uint8_t *peer_mac,
  811. enum cdp_peer_stats_type type,
  812. cdp_peer_stats_param_t *buf);
  813. QDF_STATUS
  814. (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  815. uint8_t *peer_mac,
  816. struct cdp_peer_stats *peer_stats);
  817. QDF_STATUS
  818. (*txrx_get_soc_stats)(struct cdp_soc_t *soc,
  819. struct cdp_soc_stats *soc_stats);
  820. QDF_STATUS
  821. (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc,
  822. uint8_t vdev_id,
  823. uint8_t *peer_mac);
  824. QDF_STATUS
  825. (*txrx_reset_peer_stats)(struct cdp_soc_t *soc,
  826. uint8_t vdev_id, uint8_t *peer_mac);
  827. int
  828. (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  829. void *buf, bool is_aggregate);
  830. int
  831. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  832. void *data, uint32_t len,
  833. uint32_t stats_id);
  834. int
  835. (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc,
  836. uint8_t vdev_id,
  837. wmi_host_vdev_extd_stats *buffer);
  838. QDF_STATUS
  839. (*txrx_update_vdev_stats)(struct cdp_soc_t *soc,
  840. uint8_t vdev_id, void *buf,
  841. uint16_t stats_id);
  842. int
  843. (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  844. void *buf);
  845. QDF_STATUS
  846. (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  847. struct cdp_pdev_stats *buf);
  848. int
  849. (*txrx_get_ratekbps)(int preamb, int mcs,
  850. int htflag, int gintval);
  851. QDF_STATUS
  852. (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  853. uint8_t *peer_mac, void *stats,
  854. uint32_t last_tx_rate_mcs,
  855. uint32_t stats_id);
  856. #ifdef QCA_SUPPORT_SCAN_SPCL_VAP_STATS
  857. QDF_STATUS
  858. (*txrx_get_scan_spcl_vap_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  859. struct cdp_scan_spcl_vap_stats *stats);
  860. #endif
  861. QDF_STATUS
  862. (*txrx_get_peer_delay_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  863. uint8_t *peer_mac,
  864. struct cdp_delay_tid_stats *delay_stats);
  865. QDF_STATUS
  866. (*txrx_get_peer_jitter_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  867. uint8_t vdev_id, uint8_t *peer_mac,
  868. struct cdp_peer_tid_stats *tid_stats);
  869. };
  870. struct cdp_wds_ops {
  871. QDF_STATUS
  872. (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id,
  873. u_int32_t val);
  874. QDF_STATUS
  875. (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc,
  876. uint8_t vdev_id, uint8_t *peer_mac,
  877. int wds_tx_ucast, int wds_tx_mcast);
  878. int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id,
  879. uint32_t val);
  880. };
  881. struct cdp_raw_ops {
  882. QDF_STATUS
  883. (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id,
  884. qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast);
  885. };
  886. #ifdef PEER_FLOW_CONTROL
  887. struct cdp_pflow_ops {
  888. uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc,
  889. uint8_t pdev_id,
  890. enum _dp_param_t,
  891. uint32_t, void *);
  892. };
  893. #endif /* PEER_FLOW_CONTROL */
  894. #define LRO_IPV4_SEED_ARR_SZ 5
  895. #define LRO_IPV6_SEED_ARR_SZ 11
  896. /**
  897. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  898. * @lro_enable: indicates whether rx_offld is enabled
  899. * @tcp_flag: If the TCP flags from the packet do not match
  900. * the values in this field after masking with TCP flags mask
  901. * below, packet is not rx_offld eligible
  902. * @tcp_flag_mask: field for comparing the TCP values provided
  903. * above with the TCP flags field in the received packet
  904. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  905. * 5-tuple toeplitz hash for ipv4 packets
  906. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  907. * 5-tuple toeplitz hash for ipv6 packets
  908. */
  909. struct cdp_lro_hash_config {
  910. uint32_t lro_enable;
  911. uint32_t tcp_flag:9,
  912. tcp_flag_mask:9;
  913. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  914. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  915. };
  916. struct ol_if_ops {
  917. void
  918. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  919. uint8_t pdev_id, uint8_t *peer_macaddr,
  920. uint8_t vdev_id,
  921. bool hash_based, uint8_t ring_num);
  922. QDF_STATUS
  923. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  924. uint8_t pdev_id,
  925. uint8_t vdev_id, uint8_t *peer_mac,
  926. qdf_dma_addr_t hw_qdesc, int tid,
  927. uint16_t queue_num,
  928. uint8_t ba_window_size_valid,
  929. uint16_t ba_window_size);
  930. QDF_STATUS
  931. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  932. uint8_t pdev_id,
  933. uint8_t vdev_id, uint8_t *peer_macaddr,
  934. uint32_t tid_mask);
  935. int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc,
  936. uint8_t pdev_id,
  937. uint8_t *peer_mac,
  938. uint8_t *vdev_mac, enum wlan_op_mode opmode);
  939. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  940. int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  941. uint8_t vdev_id,
  942. uint8_t *peer_macaddr,
  943. uint16_t peer_id,
  944. const uint8_t *dest_macaddr,
  945. uint8_t *next_node_mac,
  946. uint32_t flags,
  947. uint8_t type);
  948. int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  949. uint8_t vdev_id,
  950. uint8_t *dest_macaddr,
  951. uint8_t *peer_macaddr,
  952. uint32_t flags);
  953. void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  954. uint8_t vdev_id,
  955. uint8_t *wds_macaddr,
  956. uint8_t type,
  957. uint8_t delete_in_fw);
  958. QDF_STATUS
  959. (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id,
  960. struct cdp_lro_hash_config *rx_offld_hash);
  961. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  962. uint8_t type);
  963. #ifdef FEATURE_NAC_RSSI
  964. uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc,
  965. uint8_t pdev_id, void *msg);
  966. #else
  967. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  968. #endif
  969. int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  970. uint16_t peer_id, uint16_t hw_peer_id,
  971. uint8_t vdev_id, uint8_t *peer_mac_addr,
  972. enum cdp_txrx_ast_entry_type peer_type,
  973. uint32_t tx_ast_hashidx);
  974. int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  975. uint16_t peer_id,
  976. uint8_t vdev_id);
  977. int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc,
  978. enum cdp_cfg_param_type param_num);
  979. void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc,
  980. uint8_t pdev_id,
  981. struct cdp_rx_mic_err_info *info);
  982. bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc,
  983. uint8_t vdev_id, uint8_t *peer_mac_addr,
  984. qdf_nbuf_t nbuf,
  985. uint16_t hdr_space);
  986. uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc,
  987. uint8_t pdev_id, uint16_t freq);
  988. uint8_t (*freq_to_band)(struct cdp_ctrl_objmgr_psoc *psoc,
  989. uint8_t pdev_id, uint16_t freq);
  990. QDF_STATUS(*set_mec_timer)(struct cdp_ctrl_objmgr_psoc *psoc,
  991. uint8_t vdev_id, uint16_t mec_timer_val);
  992. #ifdef ATH_SUPPORT_NAC_RSSI
  993. int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  994. uint8_t pdev_id,
  995. u_int8_t vdev_id,
  996. enum cdp_nac_param_cmd cmd, char *bssid,
  997. char *client_macaddr, uint8_t chan_num);
  998. int
  999. (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  1000. uint8_t pdev_id, u_int8_t vdev_id,
  1001. enum cdp_nac_param_cmd cmd,
  1002. char *bssid, char *client_mac);
  1003. #endif
  1004. int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc,
  1005. uint16_t pdev_id, uint8_t *peer_macaddr);
  1006. /**
  1007. * send_delba() - Send delba to peer
  1008. * @psoc: Objmgr soc handle
  1009. * @vdev_id: dp vdev id
  1010. * @peer_macaddr: Peer mac addr
  1011. * @tid: Tid number
  1012. * @reason_code: Reason code
  1013. * @cdp_rcode: CDP reason code for sending DELBA
  1014. *
  1015. * Return: 0 for success, non-zero for failure
  1016. */
  1017. int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id,
  1018. uint8_t *peer_macaddr, uint8_t tid,
  1019. uint8_t reason_code, uint8_t cdp_rcode);
  1020. int
  1021. (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc,
  1022. uint8_t vdev_id,
  1023. uint8_t *dest_macaddr,
  1024. uint8_t *peer_macaddr,
  1025. uint32_t flags);
  1026. int
  1027. (*pdev_update_lmac_n_target_pdev_id)(struct cdp_ctrl_objmgr_psoc *psoc,
  1028. uint8_t *pdev_id,
  1029. uint8_t *lmac_id,
  1030. uint8_t *target_pdev_id);
  1031. bool (*is_roam_inprogress)(uint32_t vdev_id);
  1032. enum QDF_GLOBAL_MODE (*get_con_mode)(void);
  1033. #ifdef QCA_PEER_MULTIQ_SUPPORT
  1034. int (*peer_ast_flowid_map)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  1035. uint16_t peer_id, uint8_t vdev_id,
  1036. uint8_t *peer_mac_addr);
  1037. #endif
  1038. #ifdef DP_MEM_PRE_ALLOC
  1039. void *(*dp_prealloc_get_context)(uint32_t ctxt_type);
  1040. QDF_STATUS(*dp_prealloc_put_context)(uint32_t ctxt_type, void *vaddr);
  1041. void *(*dp_prealloc_get_consistent)(uint32_t *size,
  1042. void **base_vaddr_unaligned,
  1043. qdf_dma_addr_t *paddr_unaligned,
  1044. qdf_dma_addr_t *paddr_aligned,
  1045. uint32_t align,
  1046. uint32_t ring_type);
  1047. void (*dp_prealloc_put_consistent)(qdf_size_t size,
  1048. void *vaddr_unligned,
  1049. qdf_dma_addr_t paddr);
  1050. void (*dp_get_multi_pages)(uint32_t desc_type,
  1051. size_t element_size,
  1052. uint16_t element_num,
  1053. struct qdf_mem_multi_page_t *pages,
  1054. bool cacheable);
  1055. void (*dp_put_multi_pages)(uint32_t desc_type,
  1056. struct qdf_mem_multi_page_t *pages);
  1057. #endif
  1058. int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle);
  1059. char *(*get_device_name)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1060. uint8_t pdev_id);
  1061. QDF_STATUS(*nss_stats_clr)(struct cdp_ctrl_objmgr_psoc *psoc,
  1062. uint8_t vdev_id);
  1063. int (*dp_rx_get_pending)(ol_txrx_soc_handle soc);
  1064. void (*dp_rx_sched_refill_thread)(ol_txrx_soc_handle soc);
  1065. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  1066. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1067. void (*rx_wds_ext_peer_learn)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1068. uint16_t peer_id, uint8_t vdev_id,
  1069. uint8_t *peer_macaddr);
  1070. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  1071. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1072. QDF_STATUS(*peer_update_mesh_latency_params)(
  1073. struct cdp_ctrl_objmgr_psoc *psoc,
  1074. uint8_t vdev_id, uint8_t *peer_mac, uint8_t tid,
  1075. uint32_t service_interval_dl, uint32_t burst_size_dl,
  1076. uint32_t service_interval_ul, uint32_t burst_size_ul,
  1077. uint8_t add_or_sub, uint8_t ac);
  1078. #endif
  1079. uint32_t (*dp_get_tx_inqueue)(ol_txrx_soc_handle soc);
  1080. };
  1081. #ifdef DP_PEER_EXTENDED_API
  1082. /**
  1083. * struct cdp_misc_ops - mcl ops not classified
  1084. * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer
  1085. * @set_wmm_param: set wmm parameters
  1086. * @bad_peer_txctl_set_setting: configure bad peer tx limit setting
  1087. * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit
  1088. * @hl_tdls_flag_reset: reset tdls flag for vdev
  1089. * @tx_non_std: Allow the control-path SW to send data frames
  1090. * @get_vdev_id: get vdev id
  1091. * @set_wisa_mode: set wisa mode for a vdev
  1092. * @txrx_data_stall_cb_register: register data stall callback
  1093. * @txrx_data_stall_cb_deregister: deregister data stall callback
  1094. * @txrx_post_data_stall_event: post data stall event
  1095. * @runtime_suspend: ensure TXRX is ready to runtime suspend
  1096. * @runtime_resume: ensure TXRX is ready to runtime resume
  1097. * @get_opmode: get operation mode of vdev
  1098. * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for
  1099. marking first packet after wow wakeup
  1100. * @update_mac_id: update mac_id for vdev
  1101. * @flush_rx_frames: flush rx frames on the queue
  1102. * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that
  1103. has been forwarded from txrx layer
  1104. without going to upper layers
  1105. * @pkt_log_init: handler to initialize packet log
  1106. * @pkt_log_con_service: handler to connect packet log service
  1107. * @get_num_rx_contexts: handler to get number of RX contexts
  1108. * @register_packetdump_cb: register callback for different pktlog
  1109. * @unregister_packetdump_cb: unregister callback for different pktlog
  1110. * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag
  1111. * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag
  1112. *
  1113. * @vdev_inform_ll_conn: inform DP to add/delete a latency critical connection
  1114. * for this particular vdev.
  1115. * @set_swlm_enable: Enable or Disable Software Latency Manager.
  1116. * @is_swlm_enabled: Check if Software latency manager is enabled or not.
  1117. * @display_txrx_hw_info: Dump the DP rings info
  1118. *
  1119. * Function pointers for miscellaneous soc/pdev/vdev related operations.
  1120. */
  1121. struct cdp_misc_ops {
  1122. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl,
  1123. uint8_t vdev_id,
  1124. uint16_t timer_value_sec);
  1125. void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1126. struct ol_tx_wmm_param_t wmm_param);
  1127. void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl,
  1128. uint8_t pdev_id, int enable,
  1129. int period, int txq_limit);
  1130. void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl,
  1131. uint8_t pdev_id,
  1132. int level, int tput_thresh,
  1133. int tx_limit);
  1134. void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl,
  1135. uint8_t vdev_id, bool flag);
  1136. qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1137. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  1138. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  1139. uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl,
  1140. uint8_t vdev_id);
  1141. QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl,
  1142. uint8_t vdev_id, bool enable);
  1143. QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl,
  1144. uint8_t pdev_id,
  1145. data_stall_detect_cb cb);
  1146. QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl,
  1147. uint8_t pdev_id,
  1148. data_stall_detect_cb cb);
  1149. void (*txrx_post_data_stall_event)(
  1150. struct cdp_soc_t *soc_hdl,
  1151. enum data_stall_log_event_indicator indicator,
  1152. enum data_stall_log_event_type data_stall_type,
  1153. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  1154. enum data_stall_log_recovery_type recovery_type);
  1155. QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl,
  1156. uint8_t pdev_id);
  1157. QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl,
  1158. uint8_t pdev_id);
  1159. int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1160. void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl,
  1161. uint8_t pdev_id, uint8_t value);
  1162. void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1163. uint8_t mac_id);
  1164. void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1165. void *peer, bool drop);
  1166. A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl,
  1167. uint8_t vdev_id,
  1168. uint64_t *fwd_tx_packets,
  1169. uint64_t *fwd_rx_packets);
  1170. void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev,
  1171. void *scn);
  1172. void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl,
  1173. uint8_t pdev_id, void *scn);
  1174. void (*pkt_log_exit)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1175. int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl);
  1176. void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1177. ol_txrx_pktdump_cb tx_cb,
  1178. ol_txrx_pktdump_cb rx_cb);
  1179. void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl,
  1180. uint8_t pdev_id);
  1181. void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl,
  1182. uint8_t pdev_id);
  1183. void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl,
  1184. uint8_t vdev_id,
  1185. unsigned long rx_packets,
  1186. uint32_t time_in_ms,
  1187. uint32_t high_th,
  1188. uint32_t low_th);
  1189. void (*vdev_set_bundle_require_flag)(uint8_t vdev_id,
  1190. unsigned long tx_bytes,
  1191. uint32_t time_in_ms,
  1192. uint32_t high_th,
  1193. uint32_t low_th);
  1194. void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl,
  1195. uint8_t pdev_id);
  1196. QDF_STATUS (*txrx_ext_stats_request)(struct cdp_soc_t *soc_hdl,
  1197. uint8_t pdev_id,
  1198. struct cdp_txrx_ext_stats *req);
  1199. QDF_STATUS (*request_rx_hw_stats)(struct cdp_soc_t *soc_hdl,
  1200. uint8_t vdev_id);
  1201. void (*reset_rx_hw_ext_stats)(struct cdp_soc_t *soc_hdl);
  1202. QDF_STATUS (*vdev_inform_ll_conn)(struct cdp_soc_t *soc_hdl,
  1203. uint8_t vdev_id,
  1204. enum vdev_ll_conn_actions action);
  1205. QDF_STATUS (*set_swlm_enable)(struct cdp_soc_t *soc_hdl,
  1206. uint8_t val);
  1207. uint8_t (*is_swlm_enabled)(struct cdp_soc_t *soc_hdl);
  1208. void (*display_txrx_hw_info)(struct cdp_soc_t *soc_hdl);
  1209. uint32_t (*get_tx_rings_grp_bitmap)(struct cdp_soc_t *soc_hdl);
  1210. };
  1211. /**
  1212. * struct cdp_ocb_ops - mcl ocb ops
  1213. * @set_ocb_chan_info: set OCB channel info
  1214. * @get_ocb_chan_info: get OCB channel info
  1215. *
  1216. * Function pointers for operations related to OCB.
  1217. */
  1218. struct cdp_ocb_ops {
  1219. void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1220. struct ol_txrx_ocb_set_chan ocb_set_chan);
  1221. struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)(
  1222. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1223. };
  1224. /**
  1225. * struct cdp_peer_ops - mcl peer related ops
  1226. * @register_peer:
  1227. * @clear_peer:
  1228. * @find_peer_exist
  1229. * @find_peer_exist_on_vdev
  1230. * @find_peer_exist_on_other_vdev
  1231. * @peer_state_update:
  1232. * @get_vdevid:
  1233. * @register_ocb_peer:
  1234. * @peer_get_peer_mac_addr:
  1235. * @get_peer_state:
  1236. * @update_ibss_add_peer_num_of_vdev:
  1237. * @copy_mac_addr_raw:
  1238. * @add_last_real_peer:
  1239. * @is_vdev_restore_last_peer:
  1240. * @update_last_real_peer:
  1241. */
  1242. struct cdp_peer_ops {
  1243. QDF_STATUS (*register_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1244. struct ol_txrx_desc_type *sta_desc);
  1245. QDF_STATUS (*clear_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1246. struct qdf_mac_addr peer_addr);
  1247. bool (*find_peer_exist)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1248. uint8_t *peer_addr);
  1249. bool (*find_peer_exist_on_vdev)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1250. uint8_t *peer_addr);
  1251. bool (*find_peer_exist_on_other_vdev)(struct cdp_soc_t *soc,
  1252. uint8_t vdev_id,
  1253. uint8_t *peer_addr,
  1254. uint16_t max_bssid);
  1255. QDF_STATUS (*peer_state_update)(struct cdp_soc_t *soc,
  1256. uint8_t *peer_addr,
  1257. enum ol_txrx_peer_state state);
  1258. QDF_STATUS (*get_vdevid)(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  1259. uint8_t *vdev_id);
  1260. struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev,
  1261. struct qdf_mac_addr peer_addr);
  1262. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr);
  1263. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  1264. int (*get_peer_state)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1265. uint8_t *peer_mac);
  1266. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  1267. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_soc_t *soc,
  1268. uint8_t vdev_id,
  1269. int16_t peer_num_delta);
  1270. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  1271. ol_txrx_vdev_peer_remove_cb callback,
  1272. void *callback_context, bool remove_last_peer);
  1273. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  1274. ol_txrx_vdev_peer_remove_cb callback,
  1275. void *callback_context);
  1276. void (*copy_mac_addr_raw)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1277. uint8_t *bss_addr);
  1278. void (*add_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1279. uint8_t vdev_id);
  1280. bool (*is_vdev_restore_last_peer)(struct cdp_soc_t *soc,
  1281. uint8_t vdev_id,
  1282. uint8_t *peer_mac);
  1283. void (*update_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1284. uint8_t vdev_id, bool restore_last_peer);
  1285. void (*peer_detach_force_delete)(struct cdp_soc_t *soc_hdl,
  1286. uint8_t vdev_id, uint8_t *peer_addr);
  1287. void (*set_tdls_offchan_enabled)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1288. uint8_t *peer_mac, bool val);
  1289. void (*set_peer_as_tdls_peer)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1290. uint8_t *peer_mac, bool val);
  1291. void (*peer_flush_frags)(struct cdp_soc_t *soc_hdl,
  1292. uint8_t vdev_id, uint8_t *peer_mac);
  1293. };
  1294. /**
  1295. * struct cdp_mob_stats_ops - mcl mob stats ops
  1296. * @clear_stats: handler to clear ol txrx stats
  1297. * @stats: handler to update ol txrx stats
  1298. */
  1299. struct cdp_mob_stats_ops {
  1300. QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl,
  1301. uint8_t pdev_id, uint8_t bitmap);
  1302. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  1303. };
  1304. /**
  1305. * struct cdp_pmf_ops - mcl protected management frame ops
  1306. * @get_pn_info: handler to get pn info from peer
  1307. *
  1308. * Function pointers for pmf related operations.
  1309. */
  1310. struct cdp_pmf_ops {
  1311. void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac,
  1312. uint8_t vdev_id, uint8_t **last_pn_valid,
  1313. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  1314. };
  1315. #endif
  1316. #ifdef DP_FLOW_CTL
  1317. /**
  1318. * struct cdp_cfg_ops - mcl configuration ops
  1319. * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
  1320. * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
  1321. * @cfg_attach: hardcode the configuration parameters
  1322. * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
  1323. * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
  1324. * 1 enabled, 0 disabled.
  1325. * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
  1326. * indicate that mgmt over wmi is enabled
  1327. * or not,
  1328. * 1 for enabled, 0 for disable
  1329. * @is_high_latency: get device is high or low latency device,
  1330. * 1 high latency bus, 0 low latency bus
  1331. * @set_flow_control_parameters: set flow control parameters
  1332. * @set_flow_steering: set flow_steering_enabled flag
  1333. * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
  1334. * @set_new_htt_msg_format: set new_htt_msg_format flag
  1335. * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
  1336. * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
  1337. * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
  1338. * 1 enabled, 0 disabled.
  1339. * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
  1340. * 1 enabled, 0 disabled.
  1341. */
  1342. struct cdp_cfg_ops {
  1343. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  1344. uint8_t disable_rx_fwd);
  1345. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  1346. uint8_t val);
  1347. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  1348. void (*vdev_rx_set_intrabss_fwd)(struct cdp_soc_t *soc_hdl,
  1349. uint8_t vdev_id, bool val);
  1350. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  1351. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  1352. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  1353. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  1354. void *param);
  1355. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1356. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1357. void (*set_new_htt_msg_format)(uint8_t val);
  1358. void (*set_peer_unmap_conf_support)(bool val);
  1359. bool (*get_peer_unmap_conf_support)(void);
  1360. void (*set_tx_compl_tsf64)(bool val);
  1361. bool (*get_tx_compl_tsf64)(void);
  1362. };
  1363. /**
  1364. * struct cdp_flowctl_ops - mcl flow control
  1365. * @flow_pool_map_handler: handler to map flow_id and pool descriptors
  1366. * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors
  1367. * @register_pause_cb: handler to register tx pause callback
  1368. * @set_desc_global_pool_size: handler to set global pool size
  1369. * @dump_flow_pool_info: handler to dump global and flow pool info
  1370. * @tx_desc_thresh_reached: handler to set tx desc threshold
  1371. *
  1372. * Function pointers for operations related to flow control
  1373. */
  1374. struct cdp_flowctl_ops {
  1375. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  1376. uint8_t pdev_id,
  1377. uint8_t vdev_id);
  1378. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  1379. uint8_t pdev_id,
  1380. uint8_t vdev_id);
  1381. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  1382. tx_pause_callback);
  1383. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  1384. void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl);
  1385. bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl,
  1386. uint8_t vdev_id);
  1387. };
  1388. /**
  1389. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  1390. * @register_tx_flow_control: Register tx flow control callback
  1391. * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev
  1392. * @set_vdev_os_queue_status: Set vdev queue status
  1393. * @deregister_tx_flow_control_cb: Deregister tx flow control callback
  1394. * @flow_control_cb: Call osif flow control callback
  1395. * @get_tx_resource: Get tx resources and comapre with watermark
  1396. * @ll_set_tx_pause_q_depth: set pause queue depth
  1397. * @vdev_flush: Flush all packets on a particular vdev
  1398. * @vdev_pause: Pause a particular vdev
  1399. * @vdev_unpause: Unpause a particular vdev
  1400. *
  1401. * Function pointers for operations related to flow control
  1402. */
  1403. struct cdp_lflowctl_ops {
  1404. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  1405. int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl,
  1406. uint8_t pdev_id,
  1407. tx_pause_callback flowcontrol);
  1408. int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl,
  1409. uint8_t vdev_id, uint32_t chan_freq);
  1410. int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl,
  1411. uint8_t vdev_id,
  1412. enum netif_action_type action);
  1413. #else
  1414. int (*register_tx_flow_control)(
  1415. struct cdp_soc_t *soc_hdl,
  1416. uint8_t vdev_id,
  1417. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  1418. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  1419. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  1420. int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl,
  1421. uint8_t vdev_id);
  1422. void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1423. bool tx_resume);
  1424. bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1425. struct qdf_mac_addr peer_addr,
  1426. unsigned int low_watermark,
  1427. unsigned int high_watermark_offset);
  1428. int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1429. int pause_q_depth);
  1430. void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1431. void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1432. uint32_t reason, uint32_t pause_type);
  1433. void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1434. uint32_t reason, uint32_t pause_type);
  1435. };
  1436. /**
  1437. * struct cdp_throttle_ops - mcl throttle ops
  1438. * @throttle_init_period: handler to initialize tx throttle time
  1439. * @throttle_set_level: handler to set tx throttle level
  1440. */
  1441. struct cdp_throttle_ops {
  1442. void (*throttle_init_period)(struct cdp_soc_t *soc_hdl,
  1443. uint8_t pdev_id, int period,
  1444. uint8_t *dutycycle_level);
  1445. void (*throttle_set_level)(struct cdp_soc_t *soc_hdl,
  1446. uint8_t pdev_id, int level);
  1447. };
  1448. #endif
  1449. #ifdef IPA_OFFLOAD
  1450. /**
  1451. * struct cdp_ipa_ops - mcl ipa data path ops
  1452. * @ipa_get_resource:
  1453. * @ipa_set_doorbell_paddr:
  1454. * @ipa_set_active:
  1455. * @ipa_op_response:
  1456. * @ipa_register_op_cb:
  1457. * @ipa_get_stat:
  1458. * @ipa_tx_data_frame:
  1459. * @ipa_tx_buf_smmu_mapping: Create SMMU mappings for Tx
  1460. * @ipa_tx_buf_smmu_unmapping: Release SMMU mappings for Tx
  1461. * buffers to IPA
  1462. */
  1463. struct cdp_ipa_ops {
  1464. QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl,
  1465. uint8_t pdev_id);
  1466. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl,
  1467. uint8_t pdev_id);
  1468. QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1469. bool uc_active, bool is_tx);
  1470. QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl,
  1471. uint8_t pdev_id, uint8_t *op_msg);
  1472. QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl,
  1473. uint8_t pdev_id,
  1474. void (*ipa_uc_op_cb_type)
  1475. (uint8_t *op_msg, void *osif_ctxt),
  1476. void *usr_ctxt);
  1477. void (*ipa_deregister_op_cb)(struct cdp_soc_t *soc_hdl,
  1478. uint8_t pdev_id);
  1479. QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1480. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl,
  1481. uint8_t vdev_id, qdf_nbuf_t skb);
  1482. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  1483. uint32_t value);
  1484. #ifdef FEATURE_METERING
  1485. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl,
  1486. uint8_t pdev_id,
  1487. uint8_t reset_stats);
  1488. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl,
  1489. uint8_t pdev_id, uint64_t quota_bytes);
  1490. #endif
  1491. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl,
  1492. uint8_t pdev_id);
  1493. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl,
  1494. uint8_t pdev_id);
  1495. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  1496. defined(CONFIG_IPA_WDI_UNIFIED_API)
  1497. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1498. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1499. void *ipa_wdi_meter_notifier_cb,
  1500. uint32_t ipa_desc_size, void *ipa_priv,
  1501. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1502. uint32_t *rx_pipe_handle, bool is_smmu_enabled,
  1503. qdf_ipa_sys_connect_params_t *sys_in,
  1504. bool over_gsi);
  1505. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  1506. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1507. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1508. void *ipa_wdi_meter_notifier_cb,
  1509. uint32_t ipa_desc_size, void *ipa_priv,
  1510. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1511. uint32_t *rx_pipe_handle);
  1512. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  1513. QDF_STATUS (*ipa_cleanup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1514. uint32_t tx_pipe_handle,
  1515. uint32_t rx_pipe_handle);
  1516. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  1517. qdf_ipa_client_type_t prod_client,
  1518. qdf_ipa_client_type_t cons_client,
  1519. uint8_t session_id, bool is_ipv6_enabled);
  1520. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  1521. QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl,
  1522. uint8_t pdev_id);
  1523. QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl,
  1524. uint8_t pdev_id);
  1525. QDF_STATUS (*ipa_set_perf_level)(int client,
  1526. uint32_t max_supported_bw_mbps);
  1527. bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1528. qdf_nbuf_t nbuf, bool *fwd_success);
  1529. QDF_STATUS (*ipa_tx_buf_smmu_mapping)(struct cdp_soc_t *soc_hdl,
  1530. uint8_t pdev_id);
  1531. QDF_STATUS (*ipa_tx_buf_smmu_unmapping)(struct cdp_soc_t *soc_hdl,
  1532. uint8_t pdev_id);
  1533. };
  1534. #endif
  1535. #ifdef DP_POWER_SAVE
  1536. /**
  1537. * struct cdp_tx_delay_ops - mcl tx delay ops
  1538. * @tx_delay: handler to get tx packet delay
  1539. * @tx_delay_hist: handler to get tx packet delay histogram
  1540. * @tx_packet_count: handler to get tx packet count
  1541. * @tx_set_compute_interval: update compute interval period for TSM stats
  1542. *
  1543. * Function pointer for operations related to tx delay.
  1544. */
  1545. struct cdp_tx_delay_ops {
  1546. void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1547. uint32_t *queue_delay_microsec,
  1548. uint32_t *tx_delay_microsec, int category);
  1549. void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1550. uint16_t *bin_values, int category);
  1551. void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1552. uint16_t *out_packet_count,
  1553. uint16_t *out_packet_loss_count, int category);
  1554. void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl,
  1555. uint8_t pdev_id, uint32_t interval);
  1556. };
  1557. /**
  1558. * struct cdp_bus_ops - mcl bus suspend/resume ops
  1559. * @bus_suspend: handler for bus suspend
  1560. * @bus_resume: handler for bus resume
  1561. * @process_wow_ack_rsp: handler for wow ack response
  1562. * @process_target_suspend_req: handler for target suspend request
  1563. */
  1564. struct cdp_bus_ops {
  1565. QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1566. QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1567. void (*process_wow_ack_rsp)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1568. void (*process_target_suspend_req)(struct cdp_soc_t *soc_hdl,
  1569. uint8_t pdev_id);
  1570. };
  1571. #endif
  1572. #ifdef RECEIVE_OFFLOAD
  1573. /**
  1574. * struct cdp_rx_offld_ops - mcl host receive offload ops
  1575. * @register_rx_offld_flush_cb:
  1576. * @deregister_rx_offld_flush_cb:
  1577. */
  1578. struct cdp_rx_offld_ops {
  1579. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1580. void (*deregister_rx_offld_flush_cb)(void);
  1581. };
  1582. #endif
  1583. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1584. /**
  1585. * struct cdp_cfr_ops - host cfr ops
  1586. * @txrx_cfr_filter: Handler to configure host rx monitor status ring
  1587. * @txrx_get_cfr_rcc: Handler to get CFR mode
  1588. * @txrx_set_cfr_rcc: Handler to enable/disable CFR mode
  1589. * @txrx_get_cfr_dbg_stats: Handler to get debug statistics for CFR mode
  1590. * @txrx_clear_cfr_dbg_stats: Handler to clear debug statistics for CFR mode
  1591. * @txrx_enable_mon_reap_timer: Enable/Disable reap timer of monitor status ring
  1592. */
  1593. struct cdp_cfr_ops {
  1594. void (*txrx_cfr_filter)(struct cdp_soc_t *soc_hdl,
  1595. uint8_t pdev_id,
  1596. bool enable,
  1597. struct cdp_monitor_filter *filter_val);
  1598. bool (*txrx_get_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1599. uint8_t pdev_id);
  1600. void (*txrx_set_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1601. uint8_t pdev_id,
  1602. bool enable);
  1603. void (*txrx_get_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1604. uint8_t pdev_id,
  1605. struct cdp_cfr_rcc_stats *buf);
  1606. void (*txrx_clear_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1607. uint8_t pdev_id);
  1608. void (*txrx_enable_mon_reap_timer)(struct cdp_soc_t *soc_hdl,
  1609. uint8_t pdev_id,
  1610. bool enable);
  1611. };
  1612. #endif
  1613. #ifdef WLAN_SUPPORT_MSCS
  1614. /**
  1615. * struct cdp_mscs_ops - data path ops for MSCS
  1616. * @mscs_peer_lookup_n_get_priority:
  1617. */
  1618. struct cdp_mscs_ops {
  1619. int (*mscs_peer_lookup_n_get_priority)(struct cdp_soc_t *soc,
  1620. uint8_t *src_mac,
  1621. uint8_t *dst_mac,
  1622. qdf_nbuf_t nbuf);
  1623. };
  1624. #endif
  1625. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1626. /**
  1627. * struct cdp_mesh_latency_ops - data path ops for Mesh latency
  1628. * @mesh_latency_update_peer_parameter:
  1629. */
  1630. struct cdp_mesh_latency_ops {
  1631. QDF_STATUS (*mesh_latency_update_peer_parameter)(
  1632. struct cdp_soc_t *soc,
  1633. uint8_t *dest_mac, uint32_t service_interval_dl,
  1634. uint32_t burst_size_dl, uint32_t service_interval_ul,
  1635. uint32_t burst_size_ul, uint16_t priority,
  1636. uint8_t add_or_sub);
  1637. };
  1638. #endif
  1639. struct cdp_ops {
  1640. struct cdp_cmn_ops *cmn_drv_ops;
  1641. struct cdp_ctrl_ops *ctrl_ops;
  1642. struct cdp_me_ops *me_ops;
  1643. struct cdp_mon_ops *mon_ops;
  1644. struct cdp_host_stats_ops *host_stats_ops;
  1645. struct cdp_wds_ops *wds_ops;
  1646. struct cdp_raw_ops *raw_ops;
  1647. struct cdp_pflow_ops *pflow_ops;
  1648. #ifdef DP_PEER_EXTENDED_API
  1649. struct cdp_misc_ops *misc_ops;
  1650. struct cdp_peer_ops *peer_ops;
  1651. struct cdp_ocb_ops *ocb_ops;
  1652. struct cdp_mob_stats_ops *mob_stats_ops;
  1653. struct cdp_pmf_ops *pmf_ops;
  1654. #endif
  1655. #ifdef DP_FLOW_CTL
  1656. struct cdp_cfg_ops *cfg_ops;
  1657. struct cdp_flowctl_ops *flowctl_ops;
  1658. struct cdp_lflowctl_ops *l_flowctl_ops;
  1659. struct cdp_throttle_ops *throttle_ops;
  1660. #endif
  1661. #ifdef DP_POWER_SAVE
  1662. struct cdp_bus_ops *bus_ops;
  1663. struct cdp_tx_delay_ops *delay_ops;
  1664. #endif
  1665. #ifdef IPA_OFFLOAD
  1666. struct cdp_ipa_ops *ipa_ops;
  1667. #endif
  1668. #ifdef RECEIVE_OFFLOAD
  1669. struct cdp_rx_offld_ops *rx_offld_ops;
  1670. #endif
  1671. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1672. struct cdp_cfr_ops *cfr_ops;
  1673. #endif
  1674. #ifdef WLAN_SUPPORT_MSCS
  1675. struct cdp_mscs_ops *mscs_ops;
  1676. #endif
  1677. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1678. struct cdp_mesh_latency_ops *mesh_latency_ops;
  1679. #endif
  1680. };
  1681. #endif