cdp_txrx_ops.h 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411
  1. /*
  2. * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #ifdef IPA_OFFLOAD
  32. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  33. #include <qdf_ipa_wdi3.h>
  34. #else
  35. #include <qdf_ipa.h>
  36. #endif
  37. #endif
  38. /**
  39. * bitmap values to indicate special handling of peer_delete
  40. */
  41. #define CDP_PEER_DELETE_NO_SPECIAL 0
  42. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  43. struct hif_opaque_softc;
  44. /* same as ieee80211_nac_param */
  45. enum cdp_nac_param_cmd {
  46. /* IEEE80211_NAC_PARAM_ADD */
  47. CDP_NAC_PARAM_ADD = 1,
  48. /* IEEE80211_NAC_PARAM_DEL */
  49. CDP_NAC_PARAM_DEL,
  50. /* IEEE80211_NAC_PARAM_LIST */
  51. CDP_NAC_PARAM_LIST,
  52. };
  53. /******************************************************************************
  54. *
  55. * Control Interface (A Interface)
  56. *
  57. *****************************************************************************/
  58. struct cdp_cmn_ops {
  59. QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  60. int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
  61. struct cdp_vdev *(*txrx_vdev_attach)
  62. (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
  63. uint8_t vdev_id, enum wlan_op_mode op_mode);
  64. void (*txrx_vdev_detach)
  65. (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
  66. void *cb_context);
  67. struct cdp_pdev *(*txrx_pdev_attach)
  68. (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  69. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
  70. int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
  71. void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
  72. void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
  73. /**
  74. * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
  75. * @pdev: Dp pdev handle
  76. * @force: Force deinit or not
  77. *
  78. * Return: None
  79. */
  80. void (*txrx_pdev_deinit)(struct cdp_pdev *pdev, int force);
  81. void *(*txrx_peer_create)
  82. (struct cdp_vdev *vdev, uint8_t *peer_mac_addr,
  83. struct cdp_ctrl_objmgr_peer *ctrl_peer);
  84. void (*txrx_peer_setup)
  85. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  86. void (*txrx_cp_peer_del_response)
  87. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev_hdl,
  88. uint8_t *peer_mac_addr);
  89. void (*txrx_peer_teardown)
  90. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  91. int (*txrx_peer_add_ast)
  92. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  93. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  94. uint32_t flags);
  95. int (*txrx_peer_update_ast)
  96. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  97. uint8_t *mac_addr, uint32_t flags);
  98. bool (*txrx_peer_get_ast_info_by_soc)
  99. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  100. struct cdp_ast_entry_info *ast_entry_info);
  101. bool (*txrx_peer_get_ast_info_by_pdev)
  102. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  103. uint8_t pdev_id,
  104. struct cdp_ast_entry_info *ast_entry_info);
  105. QDF_STATUS (*txrx_peer_ast_delete_by_soc)
  106. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  107. txrx_ast_free_cb callback,
  108. void *cookie);
  109. QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
  110. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  111. uint8_t pdev_id,
  112. txrx_ast_free_cb callback,
  113. void *cookie);
  114. void (*txrx_peer_delete)(void *peer, uint32_t bitmap);
  115. void (*txrx_vdev_flush_peers)(struct cdp_vdev *vdev, bool unmap_only);
  116. QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
  117. uint8_t smart_monitor);
  118. void (*txrx_peer_delete_sync)(void *peer,
  119. QDF_STATUS(*delete_cb)(
  120. uint8_t vdev_id,
  121. uint32_t peerid_cnt,
  122. uint16_t *peerid_list),
  123. uint32_t bitmap);
  124. void (*txrx_peer_unmap_sync_cb_set)(struct cdp_pdev *pdev,
  125. QDF_STATUS(*unmap_resp_cb)(
  126. uint8_t vdev_id,
  127. uint32_t peerid_cnt,
  128. uint16_t *peerid_list));
  129. uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev);
  130. bool (*txrx_get_vow_config_frm_pdev)(struct cdp_pdev *pdev);
  131. void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev,
  132. int16_t chan_noise_floor);
  133. void (*txrx_set_nac)(struct cdp_peer *peer);
  134. /**
  135. * txrx_set_pdev_tx_capture() - callback to set pdev tx_capture
  136. * @soc: opaque soc handle
  137. * @pdev: data path pdev handle
  138. * @val: value of pdev_tx_capture
  139. *
  140. * Return: status: 0 - Success, non-zero: Failure
  141. */
  142. QDF_STATUS (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val);
  143. void (*txrx_get_peer_mac_from_peer_id)
  144. (struct cdp_pdev *pdev_handle,
  145. uint32_t peer_id, uint8_t *peer_mac);
  146. void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev);
  147. void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev);
  148. void (*txrx_ath_getstats)(void *pdev,
  149. struct cdp_dev_stats *stats, uint8_t type);
  150. void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status,
  151. u_int8_t *user_position);
  152. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev);
  153. void (*txrx_if_mgmt_drain)(void *ni, int force);
  154. void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
  155. void (*txrx_set_privacy_filters)
  156. (struct cdp_vdev *vdev, void *filter, uint32_t num);
  157. uint32_t (*txrx_get_cfg)(void *soc, enum cdp_dp_cfg cfg);
  158. /********************************************************************
  159. * Data Interface (B Interface)
  160. ********************************************************************/
  161. void (*txrx_vdev_register)(struct cdp_vdev *vdev,
  162. void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
  163. struct ol_txrx_ops *txrx_ops);
  164. int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
  165. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  166. int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
  167. qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
  168. uint16_t chanfreq);
  169. /**
  170. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  171. * callback function
  172. */
  173. void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type,
  174. ol_txrx_mgmt_tx_cb download_cb,
  175. ol_txrx_mgmt_tx_cb ota_ack_cb,
  176. void *ctxt);
  177. int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
  178. /**
  179. * ol_txrx_data_tx_cb - Function registered with the data path
  180. * that is called when tx frames marked as "no free" are
  181. * done being transmitted
  182. */
  183. void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
  184. ol_txrx_data_tx_cb callback, void *ctxt);
  185. /*******************************************************************
  186. * Statistics and Debugging Interface (C Interface)
  187. ********************************************************************/
  188. int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
  189. int max_subfrms_amsdu);
  190. A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
  191. struct ol_txrx_stats_req *req,
  192. bool per_vdev, bool response_expected);
  193. int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
  194. void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
  195. uint8_t cfg_stats_type, uint32_t cfg_val);
  196. void (*txrx_print_level_set)(unsigned level);
  197. /**
  198. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  199. * @vdev: vdev handle
  200. *
  201. * Return: vdev mac address
  202. */
  203. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
  204. /**
  205. * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  206. * vdev
  207. * @vdev: vdev handle
  208. *
  209. * Return: Handle to struct qdf_mac_addr
  210. */
  211. struct qdf_mac_addr *
  212. (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
  213. /**
  214. * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
  215. * @vdev: vdev handle
  216. *
  217. * Return: Handle to pdev
  218. */
  219. struct cdp_pdev *(*txrx_get_pdev_from_vdev)
  220. (struct cdp_vdev *vdev);
  221. /**
  222. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  223. * @vdev: vdev handle
  224. *
  225. * Return: Handle to control pdev
  226. */
  227. struct cdp_cfg *
  228. (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
  229. /**
  230. * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
  231. * @pdev: pdev handle
  232. *
  233. * Return: Handle to vdev
  234. */
  235. struct cdp_vdev *
  236. (*txrx_get_mon_vdev_from_pdev)(struct cdp_pdev *pdev);
  237. struct cdp_vdev *
  238. (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
  239. uint8_t vdev_id);
  240. void (*txrx_soc_detach)(void *soc);
  241. /**
  242. * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
  243. * @soc: Opaque Dp handle
  244. *
  245. * Return: None
  246. */
  247. void (*txrx_soc_deinit)(void *soc);
  248. /**
  249. * txrx_soc_init() - Initialize dp soc and dp ring memory
  250. * @soc: Opaque Dp handle
  251. * @htchdl: Opaque htc handle
  252. * @hifhdl: Opaque hif handle
  253. *
  254. * Return: None
  255. */
  256. void *(*txrx_soc_init)(void *soc,
  257. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  258. struct hif_opaque_softc *hif_handle,
  259. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  260. struct ol_if_ops *ol_ops, uint16_t device_id);
  261. /**
  262. * txrx_tso_soc_attach() - TSO attach handler triggered during
  263. * dynamic tso activation
  264. * @soc: Opaque Dp handle
  265. *
  266. * Return: QDF status
  267. */
  268. QDF_STATUS (*txrx_tso_soc_attach)(void *soc);
  269. /**
  270. * txrx_tso_soc_detach() - TSO detach handler triggered during
  271. * dynamic tso de-activation
  272. * @soc: Opaque Dp handle
  273. *
  274. * Return: QDF status
  275. */
  276. QDF_STATUS (*txrx_tso_soc_detach)(void *soc);
  277. int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid,
  278. int status);
  279. int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
  280. uint16_t tid, uint16_t batimeout,
  281. uint16_t buffersize,
  282. uint16_t startseqnum);
  283. void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
  284. uint8_t *dialogtoken, uint16_t *statuscode,
  285. uint16_t *buffersize, uint16_t *batimeout);
  286. int (*delba_process)(void *peer_handle,
  287. int tid, uint16_t reasoncode);
  288. /**
  289. * delba_tx_completion() - Indicate delba tx status
  290. * @peer_handle: Peer handle
  291. * @tid: Tid number
  292. * @status: Tx completion status
  293. *
  294. * Return: 0 on Success, 1 on failure
  295. */
  296. int (*delba_tx_completion)(void *peer_handle,
  297. uint8_t tid, int status);
  298. void (*set_addba_response)(void *peer_handle,
  299. uint8_t tid, uint16_t statuscode);
  300. uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
  301. uint16_t peer_id, uint8_t *mac_addr);
  302. void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
  303. uint8_t map_id);
  304. int (*txrx_get_total_per)(struct cdp_pdev *pdev_handle);
  305. void (*flush_cache_rx_queue)(void);
  306. void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
  307. uint8_t tos, uint8_t tid);
  308. void (*hmmc_tid_override_en)(struct cdp_pdev *pdev, bool val);
  309. void (*set_hmmc_tid_val)(struct cdp_pdev *pdev, uint8_t tid);
  310. QDF_STATUS (*txrx_stats_request)(struct cdp_vdev *vdev,
  311. struct cdp_txrx_stats_req *req);
  312. QDF_STATUS (*display_stats)(void *psoc, uint16_t value,
  313. enum qdf_stats_verbosity_level level);
  314. void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
  315. int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
  316. QDF_STATUS (*txrx_intr_attach)(void *soc);
  317. void (*txrx_intr_detach)(void *soc);
  318. void (*set_pn_check)(struct cdp_vdev *vdev,
  319. struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
  320. uint32_t *rx_pn);
  321. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  322. struct cdp_config_params *params);
  323. void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl);
  324. void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl,
  325. void *dp_txrx_hdl);
  326. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  327. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  328. void *dp_txrx_handle);
  329. void (*map_pdev_to_lmac)(struct cdp_pdev *pdev_hdl,
  330. uint32_t lmac_id);
  331. void (*txrx_peer_reset_ast)
  332. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
  333. uint8_t *peer_macaddr, void *vdev_hdl);
  334. void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  335. void *vdev_hdl);
  336. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  337. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  338. uint8_t ac, uint32_t value);
  339. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  340. uint8_t ac, uint32_t *value);
  341. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  342. uint32_t num_peers,
  343. uint32_t max_ast_index,
  344. bool peer_map_unmap_v2);
  345. void (*txrx_pdev_set_ctrl_pdev)(struct cdp_pdev *pdev_hdl,
  346. struct cdp_ctrl_objmgr_pdev *ctrl_pdev);
  347. ol_txrx_tx_fp tx_send;
  348. /**
  349. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  350. * to deliver pkt to stack.
  351. * @vdev: vdev handle
  352. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  353. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  354. */
  355. void (*txrx_get_os_rx_handles_from_vdev)
  356. (struct cdp_vdev *vdev,
  357. ol_txrx_rx_fp *stack_fn,
  358. ol_osif_vdev_handle *osif_vdev);
  359. int (*txrx_classify_update)
  360. (struct cdp_vdev *vdev, qdf_nbuf_t skb,
  361. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  362. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  363. enum cdp_capabilities dp_caps);
  364. void (*set_rate_stats_ctx)(struct cdp_soc_t *soc, void *ctx);
  365. void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
  366. void (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
  367. struct cdp_pdev *pdev,
  368. void *buf);
  369. void (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
  370. struct cdp_pdev *pdev);
  371. QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_pdev *pdev,
  372. uint8_t pcp, uint8_t tid);
  373. QDF_STATUS (*set_pdev_tidmap_prty)(struct cdp_pdev *pdev, uint8_t prty);
  374. QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_vdev *vdev,
  375. uint8_t pcp, uint8_t tid);
  376. QDF_STATUS (*set_vdev_tidmap_prty)(struct cdp_vdev *vdev, uint8_t prty);
  377. QDF_STATUS (*set_vdev_tidmap_tbl_id)(struct cdp_vdev *vdev,
  378. uint8_t mapid);
  379. #ifdef QCA_MULTIPASS_SUPPORT
  380. QDF_STATUS (*set_vlan_groupkey)(struct cdp_vdev *vdev_handle,
  381. uint16_t vlan_id, uint16_t group_key);
  382. #endif
  383. };
  384. struct cdp_ctrl_ops {
  385. int
  386. (*txrx_mempools_attach)(void *ctrl_pdev);
  387. int
  388. (*txrx_set_filter_neighbour_peers)(
  389. struct cdp_pdev *pdev,
  390. uint32_t val);
  391. int
  392. (*txrx_update_filter_neighbour_peers)(
  393. struct cdp_vdev *vdev,
  394. uint32_t cmd, uint8_t *macaddr);
  395. /**
  396. * @brief set the safemode of the device
  397. * @details
  398. * This flag is used to bypass the encrypt and decrypt processes when
  399. * send and receive packets. It works like open AUTH mode, HW will
  400. * ctreate all packets as non-encrypt frames because no key installed.
  401. * For rx fragmented frames,it bypasses all the rx defragmentaion.
  402. *
  403. * @param vdev - the data virtual device object
  404. * @param val - the safemode state
  405. * @return - void
  406. */
  407. void
  408. (*txrx_set_safemode)(
  409. struct cdp_vdev *vdev,
  410. u_int32_t val);
  411. /**
  412. * @brief configure the drop unencrypted frame flag
  413. * @details
  414. * Rx related. When set this flag, all the unencrypted frames
  415. * received over a secure connection will be discarded
  416. *
  417. * @param vdev - the data virtual device object
  418. * @param val - flag
  419. * @return - void
  420. */
  421. void
  422. (*txrx_set_drop_unenc)(
  423. struct cdp_vdev *vdev,
  424. u_int32_t val);
  425. /**
  426. * @brief set the Tx encapsulation type of the VDEV
  427. * @details
  428. * This will be used to populate the HTT desc packet type field
  429. * during Tx
  430. * @param vdev - the data virtual device object
  431. * @param val - the Tx encap type
  432. * @return - void
  433. */
  434. void
  435. (*txrx_set_tx_encap_type)(
  436. struct cdp_vdev *vdev,
  437. enum htt_cmn_pkt_type val);
  438. /**
  439. * @brief set the Rx decapsulation type of the VDEV
  440. * @details
  441. * This will be used to configure into firmware and hardware
  442. * which format to decap all Rx packets into, for all peers under
  443. * the VDEV.
  444. * @param vdev - the data virtual device object
  445. * @param val - the Rx decap mode
  446. * @return - void
  447. */
  448. void
  449. (*txrx_set_vdev_rx_decap_type)(
  450. struct cdp_vdev *vdev,
  451. enum htt_cmn_pkt_type val);
  452. /**
  453. * @brief get the Rx decapsulation type of the VDEV
  454. *
  455. * @param vdev - the data virtual device object
  456. * @return - the Rx decap type
  457. */
  458. enum htt_cmn_pkt_type
  459. (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
  460. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  461. /**
  462. * @brief Update the authorize peer object at association time
  463. * @details
  464. * For the host-based implementation of rate-control, it
  465. * updates the peer/node-related parameters within rate-control
  466. * context of the peer at association.
  467. *
  468. * @param peer - pointer to the node's object
  469. * @authorize - either to authorize or unauthorize peer
  470. *
  471. * @return none
  472. */
  473. void
  474. (*txrx_peer_authorize)(struct cdp_peer *peer,
  475. u_int32_t authorize);
  476. /* Should be ol_txrx_ctrl_api.h */
  477. void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
  478. /**
  479. * @brief setting mesh rx filter
  480. * @details
  481. * based on the bits enabled in the filter packets has to be dropped.
  482. *
  483. * @param vdev - the data virtual device object
  484. * @param val - value to set
  485. */
  486. void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
  487. void (*tx_flush_buffers)(struct cdp_vdev *vdev);
  488. int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
  489. void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
  490. enum cdp_vdev_param_type param, uint32_t val);
  491. void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
  492. /**
  493. * @brief Set the reo dest ring num of the radio
  494. * @details
  495. * Set the reo destination ring no on which we will receive
  496. * pkts for this radio.
  497. *
  498. * @param pdev - the data physical device object
  499. * @param reo_dest_ring_num - value ranges between 1 - 4
  500. */
  501. void (*txrx_set_pdev_reo_dest)(
  502. struct cdp_pdev *pdev,
  503. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  504. /**
  505. * @brief Get the reo dest ring num of the radio
  506. * @details
  507. * Get the reo destination ring no on which we will receive
  508. * pkts for this radio.
  509. *
  510. * @param pdev - the data physical device object
  511. * @return the reo destination ring number
  512. */
  513. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  514. struct cdp_pdev *pdev);
  515. int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
  516. uint32_t event);
  517. int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
  518. uint32_t event);
  519. int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
  520. void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
  521. uint8_t subtype, uint8_t tx_power);
  522. /**
  523. * txrx_set_pdev_param() - callback to set pdev parameter
  524. * @soc: opaque soc handle
  525. * @pdev: data path pdev handle
  526. * @val: value of pdev_tx_capture
  527. *
  528. * Return: status: 0 - Success, non-zero: Failure
  529. */
  530. QDF_STATUS (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
  531. enum cdp_pdev_param_type type,
  532. uint8_t val);
  533. void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
  534. #ifdef ATH_SUPPORT_NAC_RSSI
  535. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev,
  536. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  537. uint8_t chan_num);
  538. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev,
  539. char *macaddr,
  540. uint8_t *rssi);
  541. #endif
  542. void (*set_key)(struct cdp_peer *peer_handle,
  543. bool is_unicast, uint32_t *key);
  544. uint32_t (*txrx_get_vdev_param)(struct cdp_vdev *vdev,
  545. enum cdp_vdev_param_type param);
  546. int (*enable_peer_based_pktlog)(struct cdp_pdev
  547. *txrx_pdev_handle, char *macaddr, uint8_t enb_dsb);
  548. void (*calculate_delay_stats)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf);
  549. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  550. QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
  551. struct cdp_pdev *txrx_pdev_handle,
  552. uint32_t protocol_mask, uint16_t protocol_type,
  553. uint16_t tag);
  554. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  555. void (*txrx_dump_pdev_rx_protocol_tag_stats)(
  556. struct cdp_pdev *txrx_pdev_handle,
  557. uint16_t protocol_type);
  558. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  559. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  560. #ifdef QCA_MULTIPASS_SUPPORT
  561. void (*txrx_peer_set_vlan_id)(ol_txrx_soc_handle soc,
  562. struct cdp_vdev *vdev, uint8_t *peer_mac,
  563. uint16_t vlan_id);
  564. #endif
  565. };
  566. struct cdp_me_ops {
  567. u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
  568. (struct cdp_pdev *pdev, u_int16_t buf_count);
  569. u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
  570. struct cdp_pdev *pdev,
  571. u_int16_t buf_count);
  572. u_int16_t
  573. (*tx_get_mcast_buf_allocated_marked)
  574. (struct cdp_pdev *pdev);
  575. void
  576. (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
  577. void
  578. (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
  579. uint16_t
  580. (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
  581. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  582. uint8_t newmaccnt);
  583. /* Should be a function pointer in ol_txrx_osif_ops{} */
  584. /**
  585. * @brief notify mcast frame indication from FW.
  586. * @details
  587. * This notification will be used to convert
  588. * multicast frame to unicast.
  589. *
  590. * @param pdev - handle to the ctrl SW's physical device object
  591. * @param vdev_id - ID of the virtual device received the special data
  592. * @param msdu - the multicast msdu returned by FW for host inspect
  593. */
  594. int (*mcast_notify)(struct cdp_pdev *pdev,
  595. u_int8_t vdev_id, qdf_nbuf_t msdu);
  596. };
  597. struct cdp_mon_ops {
  598. void (*txrx_monitor_set_filter_ucast_data)
  599. (struct cdp_pdev *, u_int8_t val);
  600. void (*txrx_monitor_set_filter_mcast_data)
  601. (struct cdp_pdev *, u_int8_t val);
  602. void (*txrx_monitor_set_filter_non_data)
  603. (struct cdp_pdev *, u_int8_t val);
  604. bool (*txrx_monitor_get_filter_ucast_data)
  605. (struct cdp_vdev *vdev_txrx_handle);
  606. bool (*txrx_monitor_get_filter_mcast_data)
  607. (struct cdp_vdev *vdev_txrx_handle);
  608. bool (*txrx_monitor_get_filter_non_data)
  609. (struct cdp_vdev *vdev_txrx_handle);
  610. QDF_STATUS (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
  611. /* HK advance monitor filter support */
  612. QDF_STATUS (*txrx_set_advance_monitor_filter)
  613. (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
  614. void (*txrx_monitor_record_channel)
  615. (struct cdp_pdev *, int val);
  616. };
  617. struct cdp_host_stats_ops {
  618. int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
  619. struct ol_txrx_stats_req *req);
  620. void (*txrx_host_stats_clr)(struct cdp_vdev *vdev);
  621. void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
  622. int (*txrx_stats_publish)(struct cdp_pdev *pdev,
  623. struct cdp_stats_extd *buf);
  624. /**
  625. * @brief Enable enhanced stats functionality.
  626. *
  627. * @param pdev - the physical device object
  628. * @return - void
  629. */
  630. void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
  631. /**
  632. * @brief Disable enhanced stats functionality.
  633. *
  634. * @param pdev - the physical device object
  635. * @return - void
  636. */
  637. void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
  638. /**
  639. * @brief Get the desired stats from the message.
  640. *
  641. * @param pdev - the physical device object
  642. * @param stats_base - stats buffer received from FW
  643. * @param type - stats type.
  644. * @return - pointer to requested stat identified by type
  645. */
  646. uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
  647. uint32_t *stats_base, uint32_t msg_len, uint8_t type);
  648. void
  649. (*tx_print_tso_stats)(struct cdp_vdev *vdev);
  650. void
  651. (*tx_rst_tso_stats)(struct cdp_vdev *vdev);
  652. void
  653. (*tx_print_sg_stats)(struct cdp_vdev *vdev);
  654. void
  655. (*tx_rst_sg_stats)(struct cdp_vdev *vdev);
  656. void
  657. (*print_rx_cksum_stats)(struct cdp_vdev *vdev);
  658. void
  659. (*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
  660. A_STATUS
  661. (*txrx_host_me_stats)(struct cdp_vdev *vdev);
  662. void
  663. (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
  664. int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
  665. struct ol_txrx_stats_req *req);
  666. void
  667. (*print_lro_stats)(struct cdp_vdev *vdev);
  668. void
  669. (*reset_lro_stats)(struct cdp_vdev *vdev);
  670. void
  671. (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr,
  672. uint32_t cap, uint32_t copy_stats);
  673. void
  674. (*get_htt_stats)(struct cdp_pdev *pdev, void *data,
  675. uint32_t data_len);
  676. void
  677. (*txrx_update_pdev_stats)(struct cdp_pdev *pdev, void *data,
  678. uint16_t stats_id);
  679. struct cdp_peer_stats*
  680. (*txrx_get_peer_stats)(struct cdp_peer *peer);
  681. void
  682. (*txrx_reset_peer_ald_stats)(struct cdp_peer *peer);
  683. void
  684. (*txrx_reset_peer_stats)(struct cdp_peer *peer);
  685. int
  686. (*txrx_get_vdev_stats)(struct cdp_vdev *vdev, void *buf,
  687. bool is_aggregate);
  688. int
  689. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  690. void *data, uint32_t len,
  691. uint32_t stats_id);
  692. int
  693. (*txrx_get_vdev_extd_stats)(struct cdp_vdev *vdev_handle,
  694. void *buffer);
  695. void
  696. (*txrx_update_vdev_stats)(struct cdp_vdev *vdev, void *buf,
  697. uint16_t stats_id);
  698. int
  699. (*txrx_get_radio_stats)(struct cdp_pdev *pdev,
  700. void *buf);
  701. struct cdp_pdev_stats*
  702. (*txrx_get_pdev_stats)(struct cdp_pdev *pdev);
  703. int
  704. (*txrx_get_ratekbps)(int preamb, int mcs,
  705. int htflag, int gintval);
  706. void
  707. (*configure_rate_stats)(struct cdp_soc_t *soc,
  708. uint8_t val);
  709. };
  710. struct cdp_wds_ops {
  711. void
  712. (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
  713. u_int32_t val);
  714. void
  715. (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer,
  716. int wds_tx_ucast, int wds_tx_mcast);
  717. int (*vdev_set_wds)(void *vdev, uint32_t val);
  718. };
  719. struct cdp_raw_ops {
  720. int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
  721. void (*rsim_get_astentry)(struct cdp_vdev *vdev,
  722. qdf_nbuf_t *pnbuf,
  723. struct cdp_raw_ast *raw_ast);
  724. };
  725. #ifdef PEER_FLOW_CONTROL
  726. struct cdp_pflow_ops {
  727. uint32_t(*pflow_update_pdev_params)(void *,
  728. enum _ol_ath_param_t, uint32_t, void *);
  729. };
  730. #endif /* PEER_FLOW_CONTROL */
  731. #define LRO_IPV4_SEED_ARR_SZ 5
  732. #define LRO_IPV6_SEED_ARR_SZ 11
  733. /**
  734. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  735. * @lro_enable: indicates whether rx_offld is enabled
  736. * @tcp_flag: If the TCP flags from the packet do not match
  737. * the values in this field after masking with TCP flags mask
  738. * below, packet is not rx_offld eligible
  739. * @tcp_flag_mask: field for comparing the TCP values provided
  740. * above with the TCP flags field in the received packet
  741. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  742. * 5-tuple toeplitz hash for ipv4 packets
  743. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  744. * 5-tuple toeplitz hash for ipv6 packets
  745. */
  746. struct cdp_lro_hash_config {
  747. uint32_t lro_enable;
  748. uint32_t tcp_flag:9,
  749. tcp_flag_mask:9;
  750. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  751. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  752. };
  753. struct ol_if_ops {
  754. void
  755. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  756. uint8_t *peer_macaddr, uint8_t vdev_id,
  757. bool hash_based, uint8_t ring_num);
  758. QDF_STATUS
  759. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  760. uint8_t vdev_id, uint8_t *peer_mac,
  761. qdf_dma_addr_t hw_qdesc, int tid,
  762. uint16_t queue_num,
  763. uint8_t ba_window_size_valid,
  764. uint16_t ba_window_size);
  765. QDF_STATUS
  766. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  767. uint8_t vdev_id, uint8_t *peer_macaddr,
  768. uint32_t tid_mask);
  769. int (*peer_unref_delete)(void *scn_handle, uint8_t *peer_mac,
  770. uint8_t *vdev_mac, enum wlan_op_mode opmode,
  771. void *old_peer, void *new_peer);
  772. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  773. int (*peer_add_wds_entry)(void *vdev_handle,
  774. struct cdp_peer *peer_handle,
  775. const uint8_t *dest_macaddr,
  776. uint8_t *next_node_mac,
  777. uint32_t flags);
  778. int (*peer_update_wds_entry)(void *ol_soc_handle,
  779. uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  780. uint32_t flags);
  781. void (*peer_del_wds_entry)(void *ol_soc_handle,
  782. uint8_t *wds_macaddr,
  783. uint8_t type);
  784. QDF_STATUS
  785. (*lro_hash_config)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  786. struct cdp_lro_hash_config *rx_offld_hash);
  787. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  788. uint8_t type);
  789. #ifdef FEATURE_NAC_RSSI
  790. uint8_t (*rx_invalid_peer)(void *ctrl_pdev, void *msg);
  791. #else
  792. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  793. #endif
  794. int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  795. uint16_t peer_id, uint16_t hw_peer_id,
  796. uint8_t vdev_id, uint8_t *peer_mac_addr,
  797. enum cdp_txrx_ast_entry_type peer_type,
  798. uint32_t tx_ast_hashidx);
  799. int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  800. uint16_t peer_id,
  801. uint8_t vdev_id);
  802. int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  803. enum cdp_cfg_param_type param_num);
  804. void (*rx_mic_error)(void *ol_soc_handle,
  805. struct cdp_rx_mic_err_info *info);
  806. bool (*rx_frag_tkip_demic)(struct wlan_objmgr_peer *ctrl_peer,
  807. qdf_nbuf_t nbuf,
  808. uint16_t hdr_space);
  809. uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id);
  810. void (*record_act_change)(struct wlan_objmgr_pdev *pdev,
  811. u_int8_t *dstmac, bool active);
  812. #ifdef ATH_SUPPORT_NAC_RSSI
  813. int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  814. u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid,
  815. char *client_macaddr, uint8_t chan_num);
  816. int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  817. u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid);
  818. #endif
  819. int (*peer_sta_kickout)(void *ctrl_pdev, uint8_t *peer_macaddr);
  820. /**
  821. * send_delba() - Send delba to peer
  822. * @pdev_handle: Dp pdev handle
  823. * @ctrl_peer: Peer handle
  824. * @peer_macaddr: Peer mac addr
  825. * @tid: Tid number
  826. *
  827. * Return: 0 for success, non-zero for failure
  828. */
  829. int (*send_delba)(void *pdev_handle, void *ctrl_peer,
  830. uint8_t *peer_macaddr, uint8_t tid, void *vdev_handle,
  831. uint8_t reason_code);
  832. int (*peer_delete_multiple_wds_entries)(void *vdev_handle,
  833. uint8_t *dest_macaddr,
  834. uint8_t *peer_macaddr,
  835. uint32_t flags);
  836. bool (*is_roam_inprogress)(uint32_t vdev_id);
  837. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  838. };
  839. #ifdef DP_PEER_EXTENDED_API
  840. /**
  841. * struct cdp_misc_ops - mcl ops not classified
  842. * @set_ibss_vdev_heart_beat_timer:
  843. * @bad_peer_txctl_set_setting:
  844. * @bad_peer_txctl_update_threshold:
  845. * @hl_tdls_flag_reset:
  846. * @tx_non_std:
  847. * @get_vdev_id:
  848. * @set_wisa_mode:
  849. * @txrx_data_stall_cb_register:
  850. * @txrx_data_stall_cb_deregister:
  851. * @txrx_post_data_stall_event
  852. * @runtime_suspend:
  853. * @runtime_resume:
  854. * @register_packetdump_cb:
  855. * @unregister_packetdump_cb:
  856. * @pdev_reset_driver_del_ack:
  857. * @vdev_set_driver_del_ack_enable:
  858. */
  859. struct cdp_misc_ops {
  860. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
  861. uint16_t timer_value_sec);
  862. void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
  863. struct ol_tx_wmm_param_t wmm_param);
  864. void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
  865. int period, int txq_limit);
  866. void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
  867. int level, int tput_thresh, int tx_limit);
  868. void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
  869. qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
  870. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  871. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  872. uint32_t (*get_tx_ack_stats)(struct cdp_pdev *pdev, uint8_t vdev_id);
  873. QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
  874. QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb);
  875. QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb);
  876. void (*txrx_post_data_stall_event)(
  877. enum data_stall_log_event_indicator indicator,
  878. enum data_stall_log_event_type data_stall_type,
  879. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  880. enum data_stall_log_recovery_type recovery_type);
  881. QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
  882. QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
  883. int (*get_opmode)(struct cdp_vdev *vdev);
  884. void (*mark_first_wakeup_packet)(uint8_t value);
  885. void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
  886. void (*flush_rx_frames)(void *peer, bool drop);
  887. A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
  888. uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
  889. void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
  890. void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
  891. int (*get_num_rx_contexts)(struct cdp_soc_t *soc);
  892. void (*register_pktdump_cb)(ol_txrx_pktdump_cb tx_cb,
  893. ol_txrx_pktdump_cb rx_cb);
  894. void (*unregister_pktdump_cb)(void);
  895. void (*pdev_reset_driver_del_ack)(struct cdp_pdev *ppdev);
  896. void (*vdev_set_driver_del_ack_enable)(uint8_t vdev_id,
  897. unsigned long rx_packets,
  898. uint32_t time_in_ms,
  899. uint32_t high_th,
  900. uint32_t low_th);
  901. };
  902. /**
  903. * struct cdp_ocb_ops - mcl ocb ops
  904. * @set_ocb_chan_info:
  905. * @get_ocb_chan_info:
  906. */
  907. struct cdp_ocb_ops {
  908. void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
  909. struct ol_txrx_ocb_set_chan ocb_set_chan);
  910. struct ol_txrx_ocb_chan_info *
  911. (*get_ocb_chan_info)(struct cdp_vdev *vdev);
  912. };
  913. /**
  914. * struct cdp_peer_ops - mcl peer related ops
  915. * @register_peer:
  916. * @clear_peer:
  917. * @cfg_attach:
  918. * @find_peer_by_addr:
  919. * @find_peer_by_addr_and_vdev:
  920. * @local_peer_id:
  921. * @peer_find_by_local_id:
  922. * @peer_state_update:
  923. * @get_vdevid:
  924. * @get_vdev_by_sta_id:
  925. * @register_ocb_peer:
  926. * @peer_get_peer_mac_addr:
  927. * @get_peer_state:
  928. * @get_vdev_for_peer:
  929. * @update_ibss_add_peer_num_of_vdev:
  930. * @remove_peers_for_vdev:
  931. * @remove_peers_for_vdev_no_lock:
  932. * @copy_mac_addr_raw:
  933. * @add_last_real_peer:
  934. * @is_vdev_restore_last_peer:
  935. * @update_last_real_peer:
  936. */
  937. struct cdp_peer_ops {
  938. QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
  939. struct ol_txrx_desc_type *sta_desc);
  940. QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id);
  941. QDF_STATUS (*change_peer_state)(uint8_t sta_id,
  942. enum ol_txrx_peer_state sta_state,
  943. bool roam_synch_in_progress);
  944. void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
  945. uint8_t *peer_addr, uint8_t *peer_id,
  946. enum peer_debug_id_type debug_id);
  947. void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
  948. void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
  949. uint8_t *peer_addr, uint8_t *peer_id);
  950. void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
  951. struct cdp_vdev *vdev,
  952. uint8_t *peer_addr, uint8_t *peer_id);
  953. uint16_t (*local_peer_id)(void *peer);
  954. void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
  955. uint8_t local_peer_id);
  956. QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
  957. uint8_t *peer_addr,
  958. enum ol_txrx_peer_state state);
  959. QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
  960. struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev,
  961. uint8_t sta_id);
  962. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id);
  963. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  964. int (*get_peer_state)(void *peer);
  965. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  966. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
  967. int16_t peer_num_delta);
  968. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  969. ol_txrx_vdev_peer_remove_cb callback,
  970. void *callback_context, bool remove_last_peer);
  971. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  972. ol_txrx_vdev_peer_remove_cb callback,
  973. void *callback_context);
  974. void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
  975. void (*add_last_real_peer)(struct cdp_pdev *pdev,
  976. struct cdp_vdev *vdev, uint8_t *peer_id);
  977. bool (*is_vdev_restore_last_peer)(void *peer);
  978. void (*update_last_real_peer)(struct cdp_pdev *pdev, void *vdev,
  979. uint8_t *peer_id, bool restore_last_peer);
  980. void (*peer_detach_force_delete)(void *peer);
  981. };
  982. /**
  983. * struct cdp_ocb_ops - mcl ocb ops
  984. * @clear_stats:
  985. * @stats:
  986. */
  987. struct cdp_mob_stats_ops {
  988. void (*clear_stats)(uint16_t bitmap);
  989. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  990. };
  991. /**
  992. * struct cdp_pmf_ops - mcl protected management frame ops
  993. * @get_pn_info:
  994. */
  995. struct cdp_pmf_ops {
  996. void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
  997. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  998. };
  999. #endif
  1000. #ifdef DP_FLOW_CTL
  1001. /**
  1002. * struct cdp_cfg_ops - mcl configuration ops
  1003. * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
  1004. * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
  1005. * @cfg_attach: hardcode the configuration parameters
  1006. * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
  1007. * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
  1008. * 1 enabled, 0 disabled.
  1009. * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
  1010. * indicate that mgmt over wmi is enabled
  1011. * or not,
  1012. * 1 for enabled, 0 for disable
  1013. * @is_high_latency: get device is high or low latency device,
  1014. * 1 high latency bus, 0 low latency bus
  1015. * @set_flow_control_parameters: set flow control parameters
  1016. * @set_flow_steering: set flow_steering_enabled flag
  1017. * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
  1018. * @set_new_htt_msg_format: set new_htt_msg_format flag
  1019. * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
  1020. * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
  1021. * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
  1022. * 1 enabled, 0 disabled.
  1023. * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
  1024. * 1 enabled, 0 disabled.
  1025. */
  1026. struct cdp_cfg_ops {
  1027. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  1028. uint8_t disable_rx_fwd);
  1029. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  1030. uint8_t val);
  1031. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  1032. void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
  1033. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  1034. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  1035. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  1036. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  1037. void *param);
  1038. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1039. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1040. void (*set_new_htt_msg_format)(uint8_t val);
  1041. void (*set_peer_unmap_conf_support)(bool val);
  1042. bool (*get_peer_unmap_conf_support)(void);
  1043. void (*set_tx_compl_tsf64)(bool val);
  1044. bool (*get_tx_compl_tsf64)(void);
  1045. };
  1046. /**
  1047. * struct cdp_flowctl_ops - mcl flow control
  1048. * @register_pause_cb:
  1049. * @set_desc_global_pool_size:
  1050. * @dump_flow_pool_info:
  1051. */
  1052. struct cdp_flowctl_ops {
  1053. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  1054. struct cdp_pdev *pdev,
  1055. uint8_t vdev_id);
  1056. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  1057. struct cdp_pdev *pdev,
  1058. uint8_t vdev_id);
  1059. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  1060. tx_pause_callback);
  1061. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  1062. void (*dump_flow_pool_info)(void *);
  1063. bool (*tx_desc_thresh_reached)(struct cdp_vdev *vdev);
  1064. };
  1065. /**
  1066. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  1067. * @register_tx_flow_control:
  1068. * @deregister_tx_flow_control_cb:
  1069. * @flow_control_cb:
  1070. * @get_tx_resource:
  1071. * @ll_set_tx_pause_q_depth:
  1072. * @vdev_flush:
  1073. * @vdev_pause:
  1074. * @vdev_unpause:
  1075. */
  1076. struct cdp_lflowctl_ops {
  1077. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  1078. int (*register_tx_flow_control)(struct cdp_soc_t *soc,
  1079. tx_pause_callback flowcontrol);
  1080. int (*set_vdev_tx_desc_limit)(uint8_t vdev_id, uint8_t chan);
  1081. int (*set_vdev_os_queue_status)(uint8_t vdev_id,
  1082. enum netif_action_type action);
  1083. #else
  1084. int (*register_tx_flow_control)(uint8_t vdev_id,
  1085. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  1086. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  1087. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  1088. int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
  1089. void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
  1090. bool (*get_tx_resource)(uint8_t sta_id,
  1091. unsigned int low_watermark,
  1092. unsigned int high_watermark_offset);
  1093. int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
  1094. void (*vdev_flush)(struct cdp_vdev *vdev);
  1095. void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason);
  1096. void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
  1097. };
  1098. /**
  1099. * struct cdp_ocb_ops - mcl ocb ops
  1100. * @throttle_init_period:
  1101. * @throttle_set_level:
  1102. */
  1103. struct cdp_throttle_ops {
  1104. void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
  1105. uint8_t *dutycycle_level);
  1106. void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
  1107. };
  1108. #endif
  1109. #ifdef IPA_OFFLOAD
  1110. /**
  1111. * struct cdp_ipa_ops - mcl ipa data path ops
  1112. * @ipa_get_resource:
  1113. * @ipa_set_doorbell_paddr:
  1114. * @ipa_set_active:
  1115. * @ipa_op_response:
  1116. * @ipa_register_op_cb:
  1117. * @ipa_get_stat:
  1118. * @ipa_tx_data_frame:
  1119. */
  1120. struct cdp_ipa_ops {
  1121. QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev);
  1122. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev);
  1123. QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active,
  1124. bool is_tx);
  1125. QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
  1126. QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev,
  1127. void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
  1128. void *usr_ctxt);
  1129. QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev);
  1130. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
  1131. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  1132. uint32_t value);
  1133. #ifdef FEATURE_METERING
  1134. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
  1135. uint8_t reset_stats);
  1136. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
  1137. uint64_t quota_bytes);
  1138. #endif
  1139. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev);
  1140. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev);
  1141. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  1142. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  1143. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  1144. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  1145. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle,
  1146. bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in,
  1147. bool over_gsi);
  1148. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  1149. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  1150. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  1151. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  1152. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
  1153. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  1154. QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
  1155. uint32_t rx_pipe_handle);
  1156. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  1157. qdf_ipa_client_type_t prod_client,
  1158. qdf_ipa_client_type_t cons_client,
  1159. uint8_t session_id, bool is_ipv6_enabled);
  1160. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  1161. QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev);
  1162. QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev);
  1163. QDF_STATUS (*ipa_set_perf_level)(int client,
  1164. uint32_t max_supported_bw_mbps);
  1165. bool (*ipa_rx_intrabss_fwd)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf,
  1166. bool *fwd_success);
  1167. };
  1168. #endif
  1169. #ifdef DP_POWER_SAVE
  1170. /**
  1171. * struct cdp_tx_delay_ops - mcl tx delay ops
  1172. * @tx_delay:
  1173. * @tx_delay_hist:
  1174. * @tx_packet_count:
  1175. * @tx_set_compute_interval:
  1176. */
  1177. struct cdp_tx_delay_ops {
  1178. void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
  1179. uint32_t *tx_delay_microsec, int category);
  1180. void (*tx_delay_hist)(struct cdp_pdev *pdev,
  1181. uint16_t *bin_values, int category);
  1182. void (*tx_packet_count)(struct cdp_pdev *pdev,
  1183. uint16_t *out_packet_count,
  1184. uint16_t *out_packet_loss_count, int category);
  1185. void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
  1186. uint32_t interval);
  1187. };
  1188. /**
  1189. * struct cdp_bus_ops - mcl bus suspend/resume ops
  1190. * @bus_suspend:
  1191. * @bus_resume:
  1192. */
  1193. struct cdp_bus_ops {
  1194. QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev);
  1195. QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev);
  1196. };
  1197. #endif
  1198. #ifdef RECEIVE_OFFLOAD
  1199. /**
  1200. * struct cdp_rx_offld_ops - mcl host receive offload ops
  1201. * @register_rx_offld_flush_cb:
  1202. * @deregister_rx_offld_flush_cb:
  1203. */
  1204. struct cdp_rx_offld_ops {
  1205. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1206. void (*deregister_rx_offld_flush_cb)(void);
  1207. };
  1208. #endif
  1209. struct cdp_ops {
  1210. struct cdp_cmn_ops *cmn_drv_ops;
  1211. struct cdp_ctrl_ops *ctrl_ops;
  1212. struct cdp_me_ops *me_ops;
  1213. struct cdp_mon_ops *mon_ops;
  1214. struct cdp_host_stats_ops *host_stats_ops;
  1215. struct cdp_wds_ops *wds_ops;
  1216. struct cdp_raw_ops *raw_ops;
  1217. struct cdp_pflow_ops *pflow_ops;
  1218. #ifdef DP_PEER_EXTENDED_API
  1219. struct cdp_misc_ops *misc_ops;
  1220. struct cdp_peer_ops *peer_ops;
  1221. struct cdp_ocb_ops *ocb_ops;
  1222. struct cdp_mob_stats_ops *mob_stats_ops;
  1223. struct cdp_pmf_ops *pmf_ops;
  1224. #endif
  1225. #ifdef DP_FLOW_CTL
  1226. struct cdp_cfg_ops *cfg_ops;
  1227. struct cdp_flowctl_ops *flowctl_ops;
  1228. struct cdp_lflowctl_ops *l_flowctl_ops;
  1229. struct cdp_throttle_ops *throttle_ops;
  1230. #endif
  1231. #ifdef DP_POWER_SAVE
  1232. struct cdp_bus_ops *bus_ops;
  1233. struct cdp_tx_delay_ops *delay_ops;
  1234. #endif
  1235. #ifdef IPA_OFFLOAD
  1236. struct cdp_ipa_ops *ipa_ops;
  1237. #endif
  1238. #ifdef RECEIVE_OFFLOAD
  1239. struct cdp_rx_offld_ops *rx_offld_ops;
  1240. #endif
  1241. };
  1242. #endif