cdp_txrx_ops.h 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /*
  2. * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #ifdef IPA_OFFLOAD
  32. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  33. #include <qdf_ipa_wdi3.h>
  34. #else
  35. #include <qdf_ipa.h>
  36. #endif
  37. #endif
  38. /**
  39. * bitmap values to indicate special handling of peer_delete
  40. */
  41. #define CDP_PEER_DELETE_NO_SPECIAL 0
  42. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  43. /* same as ieee80211_nac_param */
  44. enum cdp_nac_param_cmd {
  45. /* IEEE80211_NAC_PARAM_ADD */
  46. CDP_NAC_PARAM_ADD = 1,
  47. /* IEEE80211_NAC_PARAM_DEL */
  48. CDP_NAC_PARAM_DEL,
  49. /* IEEE80211_NAC_PARAM_LIST */
  50. CDP_NAC_PARAM_LIST,
  51. };
  52. /******************************************************************************
  53. *
  54. * Control Interface (A Interface)
  55. *
  56. *****************************************************************************/
  57. struct cdp_cmn_ops {
  58. int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  59. int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
  60. struct cdp_vdev *(*txrx_vdev_attach)
  61. (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
  62. uint8_t vdev_id, enum wlan_op_mode op_mode);
  63. void (*txrx_vdev_detach)
  64. (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
  65. void *cb_context);
  66. struct cdp_pdev *(*txrx_pdev_attach)
  67. (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
  68. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
  69. int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
  70. void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
  71. void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
  72. void *(*txrx_peer_create)
  73. (struct cdp_vdev *vdev, uint8_t *peer_mac_addr);
  74. void (*txrx_peer_setup)
  75. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  76. void (*txrx_peer_teardown)
  77. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  78. int (*txrx_peer_add_ast)
  79. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  80. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  81. uint32_t flags);
  82. void (*txrx_peer_del_ast)
  83. (ol_txrx_soc_handle soc, void *ast_hdl);
  84. int (*txrx_peer_update_ast)
  85. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  86. uint8_t *mac_addr, uint32_t flags);
  87. void *(*txrx_peer_ast_hash_find)
  88. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr);
  89. uint8_t (*txrx_peer_ast_get_pdev_id)
  90. (ol_txrx_soc_handle soc, void *ast_hdl);
  91. uint8_t (*txrx_peer_ast_get_next_hop)
  92. (ol_txrx_soc_handle soc, void *ast_hdl);
  93. void (*txrx_peer_ast_set_type)
  94. (ol_txrx_soc_handle soc, void *ast_hdl,
  95. enum cdp_txrx_ast_entry_type type);
  96. void (*txrx_peer_delete)(void *peer, uint32_t bitmap);
  97. int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
  98. uint8_t smart_monitor);
  99. uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev);
  100. void (*txrx_set_nac)(struct cdp_peer *peer);
  101. void (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val);
  102. void (*txrx_get_peer_mac_from_peer_id)
  103. (struct cdp_pdev *pdev_handle,
  104. uint32_t peer_id, uint8_t *peer_mac);
  105. void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev);
  106. void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev);
  107. void (*txrx_ath_getstats)(void *pdev,
  108. struct cdp_dev_stats *stats, uint8_t type);
  109. void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status,
  110. u_int8_t *user_position);
  111. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev);
  112. void (*txrx_if_mgmt_drain)(void *ni, int force);
  113. void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
  114. void (*txrx_set_privacy_filters)
  115. (struct cdp_vdev *vdev, void *filter, uint32_t num);
  116. /********************************************************************
  117. * Data Interface (B Interface)
  118. ********************************************************************/
  119. void (*txrx_vdev_register)(struct cdp_vdev *vdev,
  120. void *osif_vdev, struct ol_txrx_ops *txrx_ops);
  121. int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
  122. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  123. int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
  124. qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
  125. uint16_t chanfreq);
  126. /**
  127. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  128. * callback function
  129. */
  130. void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type,
  131. ol_txrx_mgmt_tx_cb download_cb,
  132. ol_txrx_mgmt_tx_cb ota_ack_cb,
  133. void *ctxt);
  134. int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
  135. /**
  136. * ol_txrx_data_tx_cb - Function registered with the data path
  137. * that is called when tx frames marked as "no free" are
  138. * done being transmitted
  139. */
  140. void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
  141. ol_txrx_data_tx_cb callback, void *ctxt);
  142. /*******************************************************************
  143. * Statistics and Debugging Interface (C Interface)
  144. ********************************************************************/
  145. int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
  146. int max_subfrms_amsdu);
  147. A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
  148. struct ol_txrx_stats_req *req,
  149. bool per_vdev, bool response_expected);
  150. int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
  151. void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
  152. uint8_t cfg_stats_type, uint32_t cfg_val);
  153. void (*txrx_print_level_set)(unsigned level);
  154. /**
  155. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  156. * @vdev: vdev handle
  157. *
  158. * Return: vdev mac address
  159. */
  160. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
  161. /**
  162. * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  163. * vdev
  164. * @vdev: vdev handle
  165. *
  166. * Return: Handle to struct qdf_mac_addr
  167. */
  168. struct qdf_mac_addr *
  169. (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
  170. /**
  171. * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
  172. * @vdev: vdev handle
  173. *
  174. * Return: Handle to pdev
  175. */
  176. struct cdp_pdev *(*txrx_get_pdev_from_vdev)
  177. (struct cdp_vdev *vdev);
  178. /**
  179. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  180. * @vdev: vdev handle
  181. *
  182. * Return: Handle to control pdev
  183. */
  184. struct cdp_cfg *
  185. (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
  186. struct cdp_vdev *
  187. (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
  188. uint8_t vdev_id);
  189. void (*txrx_soc_detach)(void *soc);
  190. int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
  191. uint16_t tid, uint16_t batimeout, uint16_t buffersize,
  192. uint16_t startseqnum);
  193. void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
  194. uint8_t *dialogtoken, uint16_t *statuscode,
  195. uint16_t *buffersize, uint16_t *batimeout);
  196. int (*delba_process)(void *peer_handle,
  197. int tid, uint16_t reasoncode);
  198. void (*set_addba_response)(void *peer_handle,
  199. uint8_t tid, uint16_t statuscode);
  200. uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
  201. uint16_t peer_id, uint8_t *mac_addr);
  202. void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
  203. uint8_t map_id);
  204. void (*flush_cache_rx_queue)(void);
  205. void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
  206. uint8_t tos, uint8_t tid);
  207. int (*txrx_stats_request)(struct cdp_vdev *vdev,
  208. struct cdp_txrx_stats_req *req);
  209. QDF_STATUS (*display_stats)(void *psoc, uint16_t value,
  210. enum qdf_stats_verbosity_level level);
  211. void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
  212. int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
  213. QDF_STATUS (*txrx_intr_attach)(void *soc);
  214. void (*txrx_intr_detach)(void *soc);
  215. void (*set_pn_check)(struct cdp_vdev *vdev,
  216. struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
  217. uint32_t *rx_pn);
  218. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  219. struct cdp_config_params *params);
  220. void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl);
  221. void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl,
  222. void *dp_txrx_hdl);
  223. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  224. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  225. void *dp_txrx_handle);
  226. void (*txrx_peer_reset_ast)
  227. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr, void *vdev_hdl);
  228. void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  229. void *vdev_hdl);
  230. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  231. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  232. uint32_t num_peers);
  233. ol_txrx_tx_fp tx_send;
  234. };
  235. struct cdp_ctrl_ops {
  236. int
  237. (*txrx_mempools_attach)(void *ctrl_pdev);
  238. int
  239. (*txrx_set_filter_neighbour_peers)(
  240. struct cdp_pdev *pdev,
  241. uint32_t val);
  242. int
  243. (*txrx_update_filter_neighbour_peers)(
  244. struct cdp_pdev *pdev,
  245. uint32_t cmd, uint8_t *macaddr);
  246. /**
  247. * @brief set the safemode of the device
  248. * @details
  249. * This flag is used to bypass the encrypt and decrypt processes when
  250. * send and receive packets. It works like open AUTH mode, HW will
  251. * ctreate all packets as non-encrypt frames because no key installed.
  252. * For rx fragmented frames,it bypasses all the rx defragmentaion.
  253. *
  254. * @param vdev - the data virtual device object
  255. * @param val - the safemode state
  256. * @return - void
  257. */
  258. void
  259. (*txrx_set_safemode)(
  260. struct cdp_vdev *vdev,
  261. u_int32_t val);
  262. /**
  263. * @brief configure the drop unencrypted frame flag
  264. * @details
  265. * Rx related. When set this flag, all the unencrypted frames
  266. * received over a secure connection will be discarded
  267. *
  268. * @param vdev - the data virtual device object
  269. * @param val - flag
  270. * @return - void
  271. */
  272. void
  273. (*txrx_set_drop_unenc)(
  274. struct cdp_vdev *vdev,
  275. u_int32_t val);
  276. /**
  277. * @brief set the Tx encapsulation type of the VDEV
  278. * @details
  279. * This will be used to populate the HTT desc packet type field
  280. * during Tx
  281. * @param vdev - the data virtual device object
  282. * @param val - the Tx encap type
  283. * @return - void
  284. */
  285. void
  286. (*txrx_set_tx_encap_type)(
  287. struct cdp_vdev *vdev,
  288. enum htt_cmn_pkt_type val);
  289. /**
  290. * @brief set the Rx decapsulation type of the VDEV
  291. * @details
  292. * This will be used to configure into firmware and hardware
  293. * which format to decap all Rx packets into, for all peers under
  294. * the VDEV.
  295. * @param vdev - the data virtual device object
  296. * @param val - the Rx decap mode
  297. * @return - void
  298. */
  299. void
  300. (*txrx_set_vdev_rx_decap_type)(
  301. struct cdp_vdev *vdev,
  302. enum htt_cmn_pkt_type val);
  303. /**
  304. * @brief get the Rx decapsulation type of the VDEV
  305. *
  306. * @param vdev - the data virtual device object
  307. * @return - the Rx decap type
  308. */
  309. enum htt_cmn_pkt_type
  310. (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
  311. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  312. /**
  313. * @brief Update the authorize peer object at association time
  314. * @details
  315. * For the host-based implementation of rate-control, it
  316. * updates the peer/node-related parameters within rate-control
  317. * context of the peer at association.
  318. *
  319. * @param peer - pointer to the node's object
  320. * @authorize - either to authorize or unauthorize peer
  321. *
  322. * @return none
  323. */
  324. void
  325. (*txrx_peer_authorize)(struct cdp_peer *peer,
  326. u_int32_t authorize);
  327. bool
  328. (*txrx_set_inact_params)(struct cdp_pdev *pdev,
  329. u_int16_t inact_check_interval,
  330. u_int16_t inact_normal,
  331. u_int16_t inact_overload);
  332. bool
  333. (*txrx_start_inact_timer)(
  334. struct cdp_pdev *pdev,
  335. bool enable);
  336. /**
  337. * @brief Set the overload status of the radio
  338. * @details
  339. * Set the overload status of the radio, updating the inactivity
  340. * threshold and inactivity count for each node.
  341. *
  342. * @param pdev - the data physical device object
  343. * @param overload - whether the radio is overloaded or not
  344. */
  345. void (*txrx_set_overload)(
  346. struct cdp_pdev *pdev,
  347. bool overload);
  348. /**
  349. * @brief Check the inactivity status of the peer/node
  350. *
  351. * @param peer - pointer to the node's object
  352. * @return true if the node is inactive; otherwise return false
  353. */
  354. bool
  355. (*txrx_peer_is_inact)(void *peer);
  356. /**
  357. * @brief Mark inactivity status of the peer/node
  358. * @details
  359. * If it becomes active, reset inactivity count to reload value;
  360. * if the inactivity status changed, notify umac band steering.
  361. *
  362. * @param peer - pointer to the node's object
  363. * @param inactive - whether the node is inactive or not
  364. */
  365. void (*txrx_mark_peer_inact)(
  366. void *peer,
  367. bool inactive);
  368. /* Should be ol_txrx_ctrl_api.h */
  369. void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
  370. /**
  371. * @brief setting mesh rx filter
  372. * @details
  373. * based on the bits enabled in the filter packets has to be dropped.
  374. *
  375. * @param vdev - the data virtual device object
  376. * @param val - value to set
  377. */
  378. void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
  379. void (*tx_flush_buffers)(struct cdp_vdev *vdev);
  380. int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
  381. void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
  382. enum cdp_vdev_param_type param, uint32_t val);
  383. void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
  384. /**
  385. * @brief Set the reo dest ring num of the radio
  386. * @details
  387. * Set the reo destination ring no on which we will receive
  388. * pkts for this radio.
  389. *
  390. * @param pdev - the data physical device object
  391. * @param reo_dest_ring_num - value ranges between 1 - 4
  392. */
  393. void (*txrx_set_pdev_reo_dest)(
  394. struct cdp_pdev *pdev,
  395. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  396. /**
  397. * @brief Get the reo dest ring num of the radio
  398. * @details
  399. * Get the reo destination ring no on which we will receive
  400. * pkts for this radio.
  401. *
  402. * @param pdev - the data physical device object
  403. * @return the reo destination ring number
  404. */
  405. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  406. struct cdp_pdev *pdev);
  407. int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
  408. uint32_t event);
  409. int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
  410. uint32_t event);
  411. int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
  412. void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
  413. uint8_t subtype, uint8_t tx_power);
  414. void (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
  415. enum cdp_pdev_param_type type, uint8_t val);
  416. void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
  417. #ifdef ATH_SUPPORT_NAC_RSSI
  418. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev,
  419. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  420. uint8_t chan_num);
  421. #endif
  422. };
  423. struct cdp_me_ops {
  424. u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
  425. (struct cdp_pdev *pdev, u_int16_t buf_count);
  426. u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
  427. struct cdp_pdev *pdev,
  428. u_int16_t buf_count);
  429. u_int16_t
  430. (*tx_get_mcast_buf_allocated_marked)
  431. (struct cdp_pdev *pdev);
  432. void
  433. (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
  434. void
  435. (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
  436. uint16_t
  437. (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
  438. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  439. uint8_t newmaccnt);
  440. /* Should be a function pointer in ol_txrx_osif_ops{} */
  441. /**
  442. * @brief notify mcast frame indication from FW.
  443. * @details
  444. * This notification will be used to convert
  445. * multicast frame to unicast.
  446. *
  447. * @param pdev - handle to the ctrl SW's physical device object
  448. * @param vdev_id - ID of the virtual device received the special data
  449. * @param msdu - the multicast msdu returned by FW for host inspect
  450. */
  451. int (*mcast_notify)(struct cdp_pdev *pdev,
  452. u_int8_t vdev_id, qdf_nbuf_t msdu);
  453. };
  454. struct cdp_mon_ops {
  455. void (*txrx_monitor_set_filter_ucast_data)
  456. (struct cdp_pdev *, u_int8_t val);
  457. void (*txrx_monitor_set_filter_mcast_data)
  458. (struct cdp_pdev *, u_int8_t val);
  459. void (*txrx_monitor_set_filter_non_data)
  460. (struct cdp_pdev *, u_int8_t val);
  461. bool (*txrx_monitor_get_filter_ucast_data)
  462. (struct cdp_vdev *vdev_txrx_handle);
  463. bool (*txrx_monitor_get_filter_mcast_data)
  464. (struct cdp_vdev *vdev_txrx_handle);
  465. bool (*txrx_monitor_get_filter_non_data)
  466. (struct cdp_vdev *vdev_txrx_handle);
  467. int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
  468. /* HK advance monitor filter support */
  469. int (*txrx_set_advance_monitor_filter)
  470. (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
  471. };
  472. struct cdp_host_stats_ops {
  473. int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
  474. struct ol_txrx_stats_req *req);
  475. void (*txrx_host_stats_clr)(struct cdp_vdev *vdev);
  476. void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
  477. int (*txrx_stats_publish)(struct cdp_pdev *pdev,
  478. void *buf);
  479. /**
  480. * @brief Enable enhanced stats functionality.
  481. *
  482. * @param pdev - the physical device object
  483. * @return - void
  484. */
  485. void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
  486. /**
  487. * @brief Disable enhanced stats functionality.
  488. *
  489. * @param pdev - the physical device object
  490. * @return - void
  491. */
  492. void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
  493. /**
  494. * @brief Get the desired stats from the message.
  495. *
  496. * @param pdev - the physical device object
  497. * @param stats_base - stats buffer received from FW
  498. * @param type - stats type.
  499. * @return - pointer to requested stat identified by type
  500. */
  501. uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
  502. uint32_t *stats_base, uint32_t msg_len, uint8_t type);
  503. void
  504. (*tx_print_tso_stats)(struct cdp_vdev *vdev);
  505. void
  506. (*tx_rst_tso_stats)(struct cdp_vdev *vdev);
  507. void
  508. (*tx_print_sg_stats)(struct cdp_vdev *vdev);
  509. void
  510. (*tx_rst_sg_stats)(struct cdp_vdev *vdev);
  511. void
  512. (*print_rx_cksum_stats)(struct cdp_vdev *vdev);
  513. void
  514. (*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
  515. A_STATUS
  516. (*txrx_host_me_stats)(struct cdp_vdev *vdev);
  517. void
  518. (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
  519. int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
  520. struct ol_txrx_stats_req *req);
  521. void
  522. (*print_lro_stats)(struct cdp_vdev *vdev);
  523. void
  524. (*reset_lro_stats)(struct cdp_vdev *vdev);
  525. void
  526. (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr,
  527. uint32_t cap);
  528. void
  529. (*get_htt_stats)(struct cdp_pdev *pdev, void *data,
  530. uint32_t data_len);
  531. };
  532. struct cdp_wds_ops {
  533. void
  534. (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
  535. u_int32_t val);
  536. void
  537. (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer,
  538. int wds_tx_ucast, int wds_tx_mcast);
  539. int (*vdev_set_wds)(void *vdev, uint32_t val);
  540. };
  541. struct cdp_raw_ops {
  542. int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
  543. void (*rsim_get_astentry)(struct cdp_vdev *vdev,
  544. qdf_nbuf_t *pnbuf,
  545. struct cdp_raw_ast *raw_ast);
  546. };
  547. #ifdef CONFIG_WIN
  548. struct cdp_pflow_ops {
  549. uint32_t(*pflow_update_pdev_params)(void *,
  550. enum _ol_ath_param_t, uint32_t, void *);
  551. };
  552. #endif /* CONFIG_WIN */
  553. #define LRO_IPV4_SEED_ARR_SZ 5
  554. #define LRO_IPV6_SEED_ARR_SZ 11
  555. /**
  556. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  557. * @lro_enable: indicates whether rx_offld is enabled
  558. * @tcp_flag: If the TCP flags from the packet do not match
  559. * the values in this field after masking with TCP flags mask
  560. * below, packet is not rx_offld eligible
  561. * @tcp_flag_mask: field for comparing the TCP values provided
  562. * above with the TCP flags field in the received packet
  563. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  564. * 5-tuple toeplitz hash for ipv4 packets
  565. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  566. * 5-tuple toeplitz hash for ipv6 packets
  567. */
  568. struct cdp_lro_hash_config {
  569. uint32_t lro_enable;
  570. uint32_t tcp_flag:9,
  571. tcp_flag_mask:9;
  572. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  573. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  574. };
  575. struct ol_if_ops {
  576. void (*peer_set_default_routing)(void *scn_handle,
  577. uint8_t *peer_macaddr, uint8_t vdev_id,
  578. bool hash_based, uint8_t ring_num);
  579. int (*peer_rx_reorder_queue_setup)(void *scn_handle,
  580. uint8_t vdev_id, uint8_t *peer_mac,
  581. qdf_dma_addr_t hw_qdesc, int tid, uint16_t queue_num);
  582. int (*peer_rx_reorder_queue_remove)(void *scn_handle,
  583. uint8_t vdev_id, uint8_t *peer_macaddr,
  584. uint32_t tid_mask);
  585. int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id,
  586. uint8_t *peer_macaddr);
  587. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  588. int (*peer_add_wds_entry)(void *ol_soc_handle,
  589. const uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  590. uint32_t flags);
  591. int (*peer_update_wds_entry)(void *ol_soc_handle,
  592. uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  593. uint32_t flags);
  594. void (*peer_del_wds_entry)(void *ol_soc_handle,
  595. uint8_t *wds_macaddr);
  596. QDF_STATUS (*lro_hash_config)(void *scn_handle,
  597. struct cdp_lro_hash_config *rx_offld_hash);
  598. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  599. uint8_t type);
  600. uint8_t (*rx_invalid_peer)(void *osif_pdev, void *msg);
  601. int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
  602. uint8_t vdev_id, uint8_t *peer_mac_addr,
  603. enum cdp_txrx_ast_entry_type peer_type);
  604. int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id);
  605. int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num);
  606. void (*rx_mic_error)(void *ol_soc_handle,
  607. uint16_t vdev_id, void *wh);
  608. uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id);
  609. void (*record_act_change)(struct wlan_objmgr_pdev *pdev,
  610. u_int8_t *dstmac, bool active);
  611. #ifdef ATH_SUPPORT_NAC_RSSI
  612. int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  613. u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid,
  614. char *client_macaddr, uint8_t chan_num);
  615. int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  616. u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid);
  617. #endif
  618. int (*peer_sta_kickout)(void *osif_pdev, uint8_t *peer_macaddr);
  619. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  620. };
  621. #ifndef CONFIG_WIN
  622. /* From here MCL specific OPs */
  623. /**
  624. * struct cdp_misc_ops - mcl ops not classified
  625. * @set_ibss_vdev_heart_beat_timer:
  626. * @bad_peer_txctl_set_setting:
  627. * @bad_peer_txctl_update_threshold:
  628. * @hl_tdls_flag_reset:
  629. * @tx_non_std:
  630. * @get_vdev_id:
  631. * @set_wisa_mode:
  632. * @txrx_data_stall_cb_register:
  633. * @txrx_data_stall_cb_deregister:
  634. * @txrx_post_data_stall_event
  635. * @runtime_suspend:
  636. * @runtime_resume:
  637. */
  638. struct cdp_misc_ops {
  639. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
  640. uint16_t timer_value_sec);
  641. void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
  642. struct ol_tx_wmm_param_t wmm_param);
  643. void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
  644. int period, int txq_limit);
  645. void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
  646. int level, int tput_thresh, int tx_limit);
  647. void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
  648. qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
  649. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  650. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  651. uint32_t (*get_tx_ack_stats)(uint8_t vdev_id);
  652. QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
  653. QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb);
  654. QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb);
  655. void (*txrx_post_data_stall_event)(
  656. enum data_stall_log_event_indicator indicator,
  657. enum data_stall_log_event_type data_stall_type,
  658. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  659. enum data_stall_log_recovery_type recovery_type);
  660. QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
  661. QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
  662. int (*get_opmode)(struct cdp_vdev *vdev);
  663. void (*mark_first_wakeup_packet)(uint8_t value);
  664. void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
  665. void (*flush_rx_frames)(void *peer, bool drop);
  666. A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
  667. uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
  668. void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
  669. void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
  670. };
  671. /**
  672. * struct cdp_tx_delay_ops - mcl tx delay ops
  673. * @tx_delay:
  674. * @tx_delay_hist:
  675. * @tx_packet_count:
  676. * @tx_set_compute_interval:
  677. */
  678. struct cdp_tx_delay_ops {
  679. void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
  680. uint32_t *tx_delay_microsec, int category);
  681. void (*tx_delay_hist)(struct cdp_pdev *pdev,
  682. uint16_t *bin_values, int category);
  683. void (*tx_packet_count)(struct cdp_pdev *pdev,
  684. uint16_t *out_packet_count,
  685. uint16_t *out_packet_loss_count, int category);
  686. void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
  687. uint32_t interval);
  688. };
  689. /**
  690. * struct cdp_pmf_ops - mcl protected management frame ops
  691. * @get_pn_info:
  692. */
  693. struct cdp_pmf_ops {
  694. void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
  695. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  696. };
  697. /**
  698. * struct cdp_cfg_ops - mcl configuration ops
  699. * @set_cfg_rx_fwd_disabled:
  700. * @set_cfg_packet_log_enabled:
  701. * @cfg_attach:
  702. * @vdev_rx_set_intrabss_fwd:
  703. * @get_opmode:
  704. * @is_rx_fwd_disabled:
  705. * @tx_set_is_mgmt_over_wmi_enabled:
  706. * @is_high_latency:
  707. * @set_flow_control_parameters:
  708. */
  709. struct cdp_cfg_ops {
  710. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  711. uint8_t disable_rx_fwd);
  712. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  713. uint8_t val);
  714. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  715. void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
  716. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  717. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  718. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  719. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  720. void *param);
  721. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  722. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  723. };
  724. /**
  725. * struct cdp_flowctl_ops - mcl flow control
  726. * @register_pause_cb:
  727. * @set_desc_global_pool_size:
  728. * @dump_flow_pool_info:
  729. */
  730. struct cdp_flowctl_ops {
  731. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  732. struct cdp_pdev *pdev,
  733. uint8_t vdev_id);
  734. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  735. struct cdp_pdev *pdev,
  736. uint8_t vdev_id);
  737. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  738. tx_pause_callback);
  739. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  740. void (*dump_flow_pool_info)(void *);
  741. };
  742. /**
  743. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  744. * @register_tx_flow_control:
  745. * @deregister_tx_flow_control_cb:
  746. * @flow_control_cb:
  747. * @get_tx_resource:
  748. * @ll_set_tx_pause_q_depth:
  749. * @vdev_flush:
  750. * @vdev_pause:
  751. * @vdev_unpause:
  752. */
  753. struct cdp_lflowctl_ops {
  754. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  755. int (*register_tx_flow_control)(struct cdp_soc_t *soc,
  756. tx_pause_callback flowcontrol);
  757. int (*set_vdev_tx_desc_limit)(u8 vdev_id, u8 chan);
  758. int (*set_vdev_os_queue_status)(u8 vdev_id,
  759. enum netif_action_type action);
  760. #else
  761. int (*register_tx_flow_control)(uint8_t vdev_id,
  762. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  763. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  764. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  765. int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
  766. void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
  767. bool (*get_tx_resource)(uint8_t sta_id,
  768. unsigned int low_watermark,
  769. unsigned int high_watermark_offset);
  770. int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
  771. void (*vdev_flush)(struct cdp_vdev *vdev);
  772. void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason);
  773. void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
  774. };
  775. #ifdef IPA_OFFLOAD
  776. /**
  777. * struct cdp_ipa_ops - mcl ipa data path ops
  778. * @ipa_get_resource:
  779. * @ipa_set_doorbell_paddr:
  780. * @ipa_set_active:
  781. * @ipa_op_response:
  782. * @ipa_register_op_cb:
  783. * @ipa_get_stat:
  784. * @ipa_tx_data_frame:
  785. */
  786. struct cdp_ipa_ops {
  787. QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev);
  788. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev);
  789. QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active,
  790. bool is_tx);
  791. QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
  792. QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev,
  793. void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
  794. void *usr_ctxt);
  795. QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev);
  796. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
  797. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  798. uint32_t value);
  799. #ifdef FEATURE_METERING
  800. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
  801. uint8_t reset_stats);
  802. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
  803. uint64_t quota_bytes);
  804. #endif
  805. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev);
  806. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev);
  807. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  808. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  809. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  810. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  811. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle,
  812. bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in);
  813. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  814. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  815. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  816. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  817. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
  818. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  819. QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
  820. uint32_t rx_pipe_handle);
  821. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  822. qdf_ipa_client_type_t prod_client,
  823. qdf_ipa_client_type_t cons_client,
  824. uint8_t session_id, bool is_ipv6_enabled);
  825. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  826. QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev);
  827. QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev);
  828. QDF_STATUS (*ipa_set_perf_level)(int client,
  829. uint32_t max_supported_bw_mbps);
  830. };
  831. #endif
  832. /**
  833. * struct cdp_bus_ops - mcl bus suspend/resume ops
  834. * @bus_suspend:
  835. * @bus_resume:
  836. */
  837. struct cdp_bus_ops {
  838. QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev);
  839. QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev);
  840. };
  841. /**
  842. * struct cdp_ocb_ops - mcl ocb ops
  843. * @set_ocb_chan_info:
  844. * @get_ocb_chan_info:
  845. */
  846. struct cdp_ocb_ops {
  847. void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
  848. struct ol_txrx_ocb_set_chan ocb_set_chan);
  849. struct ol_txrx_ocb_chan_info *
  850. (*get_ocb_chan_info)(struct cdp_vdev *vdev);
  851. };
  852. /**
  853. * struct cdp_peer_ops - mcl peer related ops
  854. * @register_peer:
  855. * @clear_peer:
  856. * @cfg_attach:
  857. * @find_peer_by_addr:
  858. * @find_peer_by_addr_and_vdev:
  859. * @local_peer_id:
  860. * @peer_find_by_local_id:
  861. * @peer_state_update:
  862. * @get_vdevid:
  863. * @get_vdev_by_sta_id:
  864. * @register_ocb_peer:
  865. * @peer_get_peer_mac_addr:
  866. * @get_peer_state:
  867. * @get_vdev_for_peer:
  868. * @update_ibss_add_peer_num_of_vdev:
  869. * @remove_peers_for_vdev:
  870. * @remove_peers_for_vdev_no_lock:
  871. * @copy_mac_addr_raw:
  872. * @add_last_real_peer:
  873. * @last_assoc_received:
  874. * @last_disassoc_received:
  875. * @last_deauth_received:
  876. * @is_vdev_restore_last_peer:
  877. * @update_last_real_peer:
  878. */
  879. struct cdp_peer_ops {
  880. QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
  881. struct ol_txrx_desc_type *sta_desc);
  882. QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id);
  883. QDF_STATUS (*change_peer_state)(uint8_t sta_id,
  884. enum ol_txrx_peer_state sta_state,
  885. bool roam_synch_in_progress);
  886. void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
  887. u8 *peer_addr, uint8_t *peer_id,
  888. enum peer_debug_id_type debug_id);
  889. void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
  890. void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
  891. uint8_t *peer_addr, uint8_t *peer_id);
  892. void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
  893. struct cdp_vdev *vdev,
  894. uint8_t *peer_addr, uint8_t *peer_id);
  895. uint16_t (*local_peer_id)(void *peer);
  896. void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
  897. uint8_t local_peer_id);
  898. QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
  899. uint8_t *peer_addr,
  900. enum ol_txrx_peer_state state);
  901. QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
  902. struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev,
  903. uint8_t sta_id);
  904. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id);
  905. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  906. int (*get_peer_state)(void *peer);
  907. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  908. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
  909. int16_t peer_num_delta);
  910. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  911. ol_txrx_vdev_peer_remove_cb callback,
  912. void *callback_context, bool remove_last_peer);
  913. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  914. ol_txrx_vdev_peer_remove_cb callback,
  915. void *callback_context);
  916. void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
  917. void (*add_last_real_peer)(struct cdp_pdev *pdev,
  918. struct cdp_vdev *vdev, uint8_t *peer_id);
  919. qdf_time_t * (*last_assoc_received)(void *peer);
  920. qdf_time_t * (*last_disassoc_received)(void *peer);
  921. qdf_time_t * (*last_deauth_received)(void *peer);
  922. bool (*is_vdev_restore_last_peer)(void *peer);
  923. void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer,
  924. uint8_t *peer_id, bool restore_last_peer);
  925. void (*peer_detach_force_delete)(void *peer);
  926. };
  927. /**
  928. * struct cdp_ocb_ops - mcl ocb ops
  929. * @throttle_init_period:
  930. * @throttle_set_level:
  931. */
  932. struct cdp_throttle_ops {
  933. void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
  934. uint8_t *dutycycle_level);
  935. void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
  936. };
  937. /**
  938. * struct cdp_ocb_ops - mcl ocb ops
  939. * @clear_stats:
  940. * @stats:
  941. */
  942. struct cdp_mob_stats_ops {
  943. void (*clear_stats)(uint16_t bitmap);
  944. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  945. };
  946. #endif /* CONFIG_WIN */
  947. #ifdef RECEIVE_OFFLOAD
  948. /**
  949. * struct cdp_rx_offld_ops - mcl receive offload ops
  950. * @register_rx_offld_flush_cb:
  951. * @deregister_rx_offld_flush_cb:
  952. */
  953. struct cdp_rx_offld_ops {
  954. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  955. void (*deregister_rx_offld_flush_cb)(void);
  956. };
  957. #endif
  958. struct cdp_ops {
  959. struct cdp_cmn_ops *cmn_drv_ops;
  960. struct cdp_ctrl_ops *ctrl_ops;
  961. struct cdp_me_ops *me_ops;
  962. struct cdp_mon_ops *mon_ops;
  963. struct cdp_host_stats_ops *host_stats_ops;
  964. struct cdp_wds_ops *wds_ops;
  965. struct cdp_raw_ops *raw_ops;
  966. struct cdp_pflow_ops *pflow_ops;
  967. #ifndef CONFIG_WIN
  968. struct cdp_misc_ops *misc_ops;
  969. struct cdp_cfg_ops *cfg_ops;
  970. struct cdp_flowctl_ops *flowctl_ops;
  971. struct cdp_lflowctl_ops *l_flowctl_ops;
  972. #ifdef IPA_OFFLOAD
  973. struct cdp_ipa_ops *ipa_ops;
  974. #endif
  975. #ifdef RECEIVE_OFFLOAD
  976. struct cdp_rx_offld_ops *rx_offld_ops;
  977. #endif
  978. struct cdp_bus_ops *bus_ops;
  979. struct cdp_ocb_ops *ocb_ops;
  980. struct cdp_peer_ops *peer_ops;
  981. struct cdp_throttle_ops *throttle_ops;
  982. struct cdp_mob_stats_ops *mob_stats_ops;
  983. struct cdp_tx_delay_ops *delay_ops;
  984. struct cdp_pmf_ops *pmf_ops;
  985. #endif /* CONFIG_WIN */
  986. };
  987. #endif