cdp_txrx_ops.h 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103
  1. /*
  2. * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #ifdef IPA_OFFLOAD
  32. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  33. #include <qdf_ipa_wdi3.h>
  34. #else
  35. #include <qdf_ipa.h>
  36. #endif
  37. #endif
  38. /**
  39. * bitmap values to indicate special handling of peer_delete
  40. */
  41. #define CDP_PEER_DELETE_NO_SPECIAL 0
  42. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  43. /* same as ieee80211_nac_param */
  44. enum cdp_nac_param_cmd {
  45. /* IEEE80211_NAC_PARAM_ADD */
  46. CDP_NAC_PARAM_ADD = 1,
  47. /* IEEE80211_NAC_PARAM_DEL */
  48. CDP_NAC_PARAM_DEL,
  49. /* IEEE80211_NAC_PARAM_LIST */
  50. CDP_NAC_PARAM_LIST,
  51. };
  52. /******************************************************************************
  53. *
  54. * Control Interface (A Interface)
  55. *
  56. *****************************************************************************/
  57. struct cdp_cmn_ops {
  58. int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  59. int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
  60. struct cdp_vdev *(*txrx_vdev_attach)
  61. (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
  62. uint8_t vdev_id, enum wlan_op_mode op_mode);
  63. void (*txrx_vdev_detach)
  64. (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
  65. void *cb_context);
  66. struct cdp_pdev *(*txrx_pdev_attach)
  67. (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
  68. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
  69. int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
  70. void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
  71. void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
  72. void *(*txrx_peer_create)
  73. (struct cdp_vdev *vdev, uint8_t *peer_mac_addr);
  74. void (*txrx_peer_setup)
  75. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  76. void (*txrx_peer_teardown)
  77. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  78. int (*txrx_peer_add_ast)
  79. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  80. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  81. uint32_t flags);
  82. void (*txrx_peer_del_ast)
  83. (ol_txrx_soc_handle soc, void *ast_hdl);
  84. int (*txrx_peer_update_ast)
  85. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  86. void *ast_hdl, uint32_t flags);
  87. void *(*txrx_peer_ast_hash_find)
  88. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr);
  89. uint8_t (*txrx_peer_ast_get_pdev_id)
  90. (ol_txrx_soc_handle soc, void *ast_hdl);
  91. uint8_t (*txrx_peer_ast_get_next_hop)
  92. (ol_txrx_soc_handle soc, void *ast_hdl);
  93. void (*txrx_peer_ast_set_type)
  94. (ol_txrx_soc_handle soc, void *ast_hdl,
  95. enum cdp_txrx_ast_entry_type type);
  96. void (*txrx_peer_delete)(void *peer, uint32_t bitmap);
  97. int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
  98. uint8_t smart_monitor);
  99. uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev);
  100. void (*txrx_set_nac)(struct cdp_peer *peer);
  101. void (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val);
  102. void (*txrx_get_peer_mac_from_peer_id)
  103. (struct cdp_pdev *pdev_handle,
  104. uint32_t peer_id, uint8_t *peer_mac);
  105. void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev);
  106. void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev);
  107. void (*txrx_ath_getstats)(struct cdp_pdev *pdev,
  108. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
  109. struct rtnl_link_stats64 *stats);
  110. #else
  111. struct net_device_stats *stats);
  112. #endif
  113. void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status,
  114. u_int8_t *user_position);
  115. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev);
  116. void (*txrx_if_mgmt_drain)(void *ni, int force);
  117. void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
  118. void (*txrx_set_privacy_filters)
  119. (struct cdp_vdev *vdev, void *filter, uint32_t num);
  120. /********************************************************************
  121. * Data Interface (B Interface)
  122. ********************************************************************/
  123. void (*txrx_vdev_register)(struct cdp_vdev *vdev,
  124. void *osif_vdev, struct ol_txrx_ops *txrx_ops);
  125. int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
  126. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  127. int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
  128. qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
  129. uint16_t chanfreq);
  130. /**
  131. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  132. * callback function
  133. */
  134. void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type,
  135. ol_txrx_mgmt_tx_cb download_cb,
  136. ol_txrx_mgmt_tx_cb ota_ack_cb,
  137. void *ctxt);
  138. int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
  139. /**
  140. * ol_txrx_data_tx_cb - Function registered with the data path
  141. * that is called when tx frames marked as "no free" are
  142. * done being transmitted
  143. */
  144. void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
  145. ol_txrx_data_tx_cb callback, void *ctxt);
  146. /*******************************************************************
  147. * Statistics and Debugging Interface (C Inteface)
  148. ********************************************************************/
  149. int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
  150. int max_subfrms_amsdu);
  151. A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
  152. struct ol_txrx_stats_req *req,
  153. bool per_vdev, bool response_expected);
  154. int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
  155. void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
  156. uint8_t cfg_stats_type, uint32_t cfg_val);
  157. void (*txrx_print_level_set)(unsigned level);
  158. /**
  159. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  160. * @vdev: vdev handle
  161. *
  162. * Return: vdev mac address
  163. */
  164. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
  165. /**
  166. * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  167. * vdev
  168. * @vdev: vdev handle
  169. *
  170. * Return: Handle to struct qdf_mac_addr
  171. */
  172. struct qdf_mac_addr *
  173. (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
  174. /**
  175. * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
  176. * @vdev: vdev handle
  177. *
  178. * Return: Handle to pdev
  179. */
  180. struct cdp_pdev *(*txrx_get_pdev_from_vdev)
  181. (struct cdp_vdev *vdev);
  182. /**
  183. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  184. * @vdev: vdev handle
  185. *
  186. * Return: Handle to control pdev
  187. */
  188. struct cdp_cfg *
  189. (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
  190. struct cdp_vdev *
  191. (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
  192. uint8_t vdev_id);
  193. void (*txrx_soc_detach)(void *soc);
  194. int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
  195. uint16_t tid, uint16_t batimeout, uint16_t buffersize,
  196. uint16_t startseqnum);
  197. void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
  198. uint8_t *dialogtoken, uint16_t *statuscode,
  199. uint16_t *buffersize, uint16_t *batimeout);
  200. int (*delba_process)(void *peer_handle,
  201. int tid, uint16_t reasoncode);
  202. void (*set_addba_response)(void *peer_handle,
  203. uint8_t tid, uint16_t statuscode);
  204. uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
  205. uint16_t peer_id, uint8_t *mac_addr);
  206. void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
  207. uint8_t map_id);
  208. void (*flush_cache_rx_queue)(void);
  209. void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
  210. uint8_t tos, uint8_t tid);
  211. int (*txrx_stats)(struct cdp_vdev *vdev, enum cdp_stats stats);
  212. int (*txrx_stats_request)(struct cdp_vdev *vdev,
  213. struct cdp_txrx_stats_req *req);
  214. QDF_STATUS (*display_stats)(void *psoc, uint16_t value,
  215. enum qdf_stats_verbosity_level level);
  216. void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
  217. int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
  218. QDF_STATUS (*txrx_intr_attach)(void *soc);
  219. void (*txrx_intr_detach)(void *soc);
  220. void (*set_pn_check)(struct cdp_vdev *vdev,
  221. struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
  222. uint32_t *rx_pn);
  223. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  224. struct cdp_config_params *params);
  225. void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl);
  226. void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl,
  227. void *dp_txrx_hdl);
  228. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  229. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  230. void *dp_txrx_handle);
  231. ol_txrx_tx_fp tx_send;
  232. };
  233. struct cdp_ctrl_ops {
  234. int
  235. (*txrx_mempools_attach)(void *ctrl_pdev);
  236. int
  237. (*txrx_set_filter_neighbour_peers)(
  238. struct cdp_pdev *pdev,
  239. uint32_t val);
  240. int
  241. (*txrx_update_filter_neighbour_peers)(
  242. struct cdp_pdev *pdev,
  243. uint32_t cmd, uint8_t *macaddr);
  244. /**
  245. * @brief set the safemode of the device
  246. * @details
  247. * This flag is used to bypass the encrypt and decrypt processes when
  248. * send and receive packets. It works like open AUTH mode, HW will
  249. * ctreate all packets as non-encrypt frames because no key installed.
  250. * For rx fragmented frames,it bypasses all the rx defragmentaion.
  251. *
  252. * @param vdev - the data virtual device object
  253. * @param val - the safemode state
  254. * @return - void
  255. */
  256. void
  257. (*txrx_set_safemode)(
  258. struct cdp_vdev *vdev,
  259. u_int32_t val);
  260. /**
  261. * @brief configure the drop unencrypted frame flag
  262. * @details
  263. * Rx related. When set this flag, all the unencrypted frames
  264. * received over a secure connection will be discarded
  265. *
  266. * @param vdev - the data virtual device object
  267. * @param val - flag
  268. * @return - void
  269. */
  270. void
  271. (*txrx_set_drop_unenc)(
  272. struct cdp_vdev *vdev,
  273. u_int32_t val);
  274. /**
  275. * @brief set the Tx encapsulation type of the VDEV
  276. * @details
  277. * This will be used to populate the HTT desc packet type field
  278. * during Tx
  279. * @param vdev - the data virtual device object
  280. * @param val - the Tx encap type
  281. * @return - void
  282. */
  283. void
  284. (*txrx_set_tx_encap_type)(
  285. struct cdp_vdev *vdev,
  286. enum htt_cmn_pkt_type val);
  287. /**
  288. * @brief set the Rx decapsulation type of the VDEV
  289. * @details
  290. * This will be used to configure into firmware and hardware
  291. * which format to decap all Rx packets into, for all peers under
  292. * the VDEV.
  293. * @param vdev - the data virtual device object
  294. * @param val - the Rx decap mode
  295. * @return - void
  296. */
  297. void
  298. (*txrx_set_vdev_rx_decap_type)(
  299. struct cdp_vdev *vdev,
  300. enum htt_cmn_pkt_type val);
  301. /**
  302. * @brief get the Rx decapsulation type of the VDEV
  303. *
  304. * @param vdev - the data virtual device object
  305. * @return - the Rx decap type
  306. */
  307. enum htt_cmn_pkt_type
  308. (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
  309. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  310. /**
  311. * @brief Update the authorize peer object at association time
  312. * @details
  313. * For the host-based implementation of rate-control, it
  314. * updates the peer/node-related parameters within rate-control
  315. * context of the peer at association.
  316. *
  317. * @param peer - pointer to the node's object
  318. * @authorize - either to authorize or unauthorize peer
  319. *
  320. * @return none
  321. */
  322. void
  323. (*txrx_peer_authorize)(struct cdp_peer *peer,
  324. u_int32_t authorize);
  325. bool
  326. (*txrx_set_inact_params)(struct cdp_pdev *pdev,
  327. u_int16_t inact_check_interval,
  328. u_int16_t inact_normal,
  329. u_int16_t inact_overload);
  330. bool
  331. (*txrx_start_inact_timer)(
  332. struct cdp_pdev *pdev,
  333. bool enable);
  334. /**
  335. * @brief Set the overload status of the radio
  336. * @details
  337. * Set the overload status of the radio, updating the inactivity
  338. * threshold and inactivity count for each node.
  339. *
  340. * @param pdev - the data physical device object
  341. * @param overload - whether the radio is overloaded or not
  342. */
  343. void (*txrx_set_overload)(
  344. struct cdp_pdev *pdev,
  345. bool overload);
  346. /**
  347. * @brief Check the inactivity status of the peer/node
  348. *
  349. * @param peer - pointer to the node's object
  350. * @return true if the node is inactive; otherwise return false
  351. */
  352. bool
  353. (*txrx_peer_is_inact)(void *peer);
  354. /**
  355. * @brief Mark inactivity status of the peer/node
  356. * @details
  357. * If it becomes active, reset inactivity count to reload value;
  358. * if the inactivity status changed, notify umac band steering.
  359. *
  360. * @param peer - pointer to the node's object
  361. * @param inactive - whether the node is inactive or not
  362. */
  363. void (*txrx_mark_peer_inact)(
  364. void *peer,
  365. bool inactive);
  366. /* Should be ol_txrx_ctrl_api.h */
  367. void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
  368. /**
  369. * @brief setting mesh rx filter
  370. * @details
  371. * based on the bits enabled in the filter packets has to be dropped.
  372. *
  373. * @param vdev - the data virtual device object
  374. * @param val - value to set
  375. */
  376. void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
  377. void (*tx_flush_buffers)(struct cdp_vdev *vdev);
  378. int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
  379. void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
  380. enum cdp_vdev_param_type param, uint32_t val);
  381. void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
  382. /**
  383. * @brief Set the reo dest ring num of the radio
  384. * @details
  385. * Set the reo destination ring no on which we will receive
  386. * pkts for this radio.
  387. *
  388. * @param pdev - the data physical device object
  389. * @param reo_dest_ring_num - value ranges between 1 - 4
  390. */
  391. void (*txrx_set_pdev_reo_dest)(
  392. struct cdp_pdev *pdev,
  393. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  394. /**
  395. * @brief Get the reo dest ring num of the radio
  396. * @details
  397. * Get the reo destination ring no on which we will receive
  398. * pkts for this radio.
  399. *
  400. * @param pdev - the data physical device object
  401. * @return the reo destination ring number
  402. */
  403. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  404. struct cdp_pdev *pdev);
  405. int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
  406. uint32_t event);
  407. int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
  408. uint32_t event);
  409. int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
  410. void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
  411. uint8_t subtype, uint8_t tx_power);
  412. void (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
  413. enum cdp_pdev_param_type type, uint8_t val);
  414. void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
  415. #ifdef ATH_SUPPORT_NAC_RSSI
  416. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev,
  417. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  418. uint8_t chan_num);
  419. #endif
  420. };
  421. struct cdp_me_ops {
  422. u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
  423. (struct cdp_pdev *pdev, u_int16_t buf_count);
  424. u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
  425. struct cdp_pdev *pdev,
  426. u_int16_t buf_count);
  427. u_int16_t
  428. (*tx_get_mcast_buf_allocated_marked)
  429. (struct cdp_pdev *pdev);
  430. void
  431. (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
  432. void
  433. (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
  434. uint16_t
  435. (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
  436. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  437. uint8_t newmaccnt);
  438. /* Should be a function pointer in ol_txrx_osif_ops{} */
  439. /**
  440. * @brief notify mcast frame indication from FW.
  441. * @details
  442. * This notification will be used to convert
  443. * multicast frame to unicast.
  444. *
  445. * @param pdev - handle to the ctrl SW's physical device object
  446. * @param vdev_id - ID of the virtual device received the special data
  447. * @param msdu - the multicast msdu returned by FW for host inspect
  448. */
  449. int (*mcast_notify)(struct cdp_pdev *pdev,
  450. u_int8_t vdev_id, qdf_nbuf_t msdu);
  451. };
  452. struct cdp_mon_ops {
  453. void (*txrx_monitor_set_filter_ucast_data)
  454. (struct cdp_pdev *, u_int8_t val);
  455. void (*txrx_monitor_set_filter_mcast_data)
  456. (struct cdp_pdev *, u_int8_t val);
  457. void (*txrx_monitor_set_filter_non_data)
  458. (struct cdp_pdev *, u_int8_t val);
  459. bool (*txrx_monitor_get_filter_ucast_data)
  460. (struct cdp_vdev *vdev_txrx_handle);
  461. bool (*txrx_monitor_get_filter_mcast_data)
  462. (struct cdp_vdev *vdev_txrx_handle);
  463. bool (*txrx_monitor_get_filter_non_data)
  464. (struct cdp_vdev *vdev_txrx_handle);
  465. int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
  466. /* HK advance monitor filter support */
  467. int (*txrx_set_advance_monitor_filter)
  468. (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
  469. };
  470. struct cdp_host_stats_ops {
  471. int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
  472. struct ol_txrx_stats_req *req);
  473. void (*txrx_host_stats_clr)(struct cdp_vdev *vdev);
  474. void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
  475. int (*txrx_stats_publish)(struct cdp_pdev *pdev,
  476. void *buf);
  477. /**
  478. * @brief Enable enhanced stats functionality.
  479. *
  480. * @param pdev - the physical device object
  481. * @return - void
  482. */
  483. void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
  484. /**
  485. * @brief Disable enhanced stats functionality.
  486. *
  487. * @param pdev - the physical device object
  488. * @return - void
  489. */
  490. void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
  491. /**
  492. * @brief Get the desired stats from the message.
  493. *
  494. * @param pdev - the physical device object
  495. * @param stats_base - stats buffer recieved from FW
  496. * @param type - stats type.
  497. * @return - pointer to requested stat identified by type
  498. */
  499. uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
  500. uint32_t *stats_base, uint32_t msg_len, uint8_t type);
  501. void
  502. (*tx_print_tso_stats)(struct cdp_vdev *vdev);
  503. void
  504. (*tx_rst_tso_stats)(struct cdp_vdev *vdev);
  505. void
  506. (*tx_print_sg_stats)(struct cdp_vdev *vdev);
  507. void
  508. (*tx_rst_sg_stats)(struct cdp_vdev *vdev);
  509. void
  510. (*print_rx_cksum_stats)(struct cdp_vdev *vdev);
  511. void
  512. (*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
  513. A_STATUS
  514. (*txrx_host_me_stats)(struct cdp_vdev *vdev);
  515. void
  516. (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
  517. int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
  518. struct ol_txrx_stats_req *req);
  519. void
  520. (*print_lro_stats)(struct cdp_vdev *vdev);
  521. void
  522. (*reset_lro_stats)(struct cdp_vdev *vdev);
  523. void
  524. (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr,
  525. uint32_t cap);
  526. void
  527. (*get_htt_stats)(struct cdp_pdev *pdev, void *data,
  528. uint32_t data_len);
  529. };
  530. struct cdp_wds_ops {
  531. void
  532. (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
  533. u_int32_t val);
  534. void
  535. (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer,
  536. int wds_tx_ucast, int wds_tx_mcast);
  537. int (*vdev_set_wds)(void *vdev, uint32_t val);
  538. };
  539. struct cdp_raw_ops {
  540. int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
  541. void (*rsim_get_astentry)(struct cdp_vdev *vdev,
  542. qdf_nbuf_t *pnbuf,
  543. struct cdp_raw_ast *raw_ast);
  544. };
  545. #ifdef CONFIG_WIN
  546. struct cdp_pflow_ops {
  547. uint32_t(*pflow_update_pdev_params)(void *,
  548. enum _ol_ath_param_t, uint32_t, void *);
  549. };
  550. #endif /* CONFIG_WIN */
  551. #define LRO_IPV4_SEED_ARR_SZ 5
  552. #define LRO_IPV6_SEED_ARR_SZ 11
  553. /**
  554. * struct cdp_lro_config - set LRO init parameters
  555. * @lro_enable: indicates whether lro is enabled
  556. * @tcp_flag: If the TCP flags from the packet do not match
  557. * the values in this field after masking with TCP flags mask
  558. * below, packet is not LRO eligible
  559. * @tcp_flag_mask: field for comparing the TCP values provided
  560. * above with the TCP flags field in the received packet
  561. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  562. * 5-tuple toeplitz hash for ipv4 packets
  563. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  564. * 5-tuple toeplitz hash for ipv6 packets
  565. */
  566. struct cdp_lro_hash_config {
  567. uint32_t lro_enable;
  568. uint32_t tcp_flag:9,
  569. tcp_flag_mask:9;
  570. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  571. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  572. };
  573. struct ol_if_ops {
  574. void (*peer_set_default_routing)(void *scn_handle,
  575. uint8_t *peer_macaddr, uint8_t vdev_id,
  576. bool hash_based, uint8_t ring_num);
  577. int (*peer_rx_reorder_queue_setup)(void *scn_handle,
  578. uint8_t vdev_id, uint8_t *peer_mac,
  579. qdf_dma_addr_t hw_qdesc, int tid, uint16_t queue_num);
  580. int (*peer_rx_reorder_queue_remove)(void *scn_handle,
  581. uint8_t vdev_id, uint8_t *peer_macaddr,
  582. uint32_t tid_mask);
  583. int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id,
  584. uint8_t *peer_macaddr);
  585. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  586. int (*peer_add_wds_entry)(void *ol_soc_handle,
  587. const uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  588. uint32_t flags);
  589. int (*peer_update_wds_entry)(void *ol_soc_handle,
  590. uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  591. uint32_t flags);
  592. void (*peer_del_wds_entry)(void *ol_soc_handle,
  593. uint8_t *wds_macaddr);
  594. QDF_STATUS (*lro_hash_config)(void *scn_handle,
  595. struct cdp_lro_hash_config *lro_hash);
  596. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  597. uint8_t type);
  598. uint8_t (*rx_invalid_peer)(void *osif_pdev, void *msg);
  599. int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
  600. uint8_t vdev_id, uint8_t *peer_mac_addr,
  601. enum cdp_txrx_ast_entry_type peer_type);
  602. int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id);
  603. int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num);
  604. void (*rx_mic_error)(void *ol_soc_handle,
  605. uint16_t vdev_id, void *wh);
  606. uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id);
  607. void (*record_act_change)(struct wlan_objmgr_pdev *pdev,
  608. u_int8_t *dstmac, bool active);
  609. #ifdef ATH_SUPPORT_NAC_RSSI
  610. int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  611. u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid,
  612. char *client_macaddr, uint8_t chan_num);
  613. int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  614. u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid);
  615. #endif
  616. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  617. };
  618. #ifndef CONFIG_WIN
  619. /* From here MCL specific OPs */
  620. /**
  621. * struct cdp_misc_ops - mcl ops not classified
  622. * @set_ibss_vdev_heart_beat_timer:
  623. * @bad_peer_txctl_set_setting:
  624. * @bad_peer_txctl_update_threshold:
  625. * @hl_tdls_flag_reset:
  626. * @tx_non_std:
  627. * @get_vdev_id:
  628. * @set_wisa_mode:
  629. * @txrx_data_stall_cb_register:
  630. * @txrx_data_stall_cb_deregister:
  631. * @txrx_post_data_stall_event
  632. * @runtime_suspend:
  633. * @runtime_resume:
  634. */
  635. struct cdp_misc_ops {
  636. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
  637. uint16_t timer_value_sec);
  638. void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
  639. struct ol_tx_wmm_param_t wmm_param);
  640. void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
  641. int period, int txq_limit);
  642. void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
  643. int level, int tput_thresh, int tx_limit);
  644. void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
  645. qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
  646. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  647. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  648. QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
  649. QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb);
  650. QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb);
  651. void (*txrx_post_data_stall_event)(
  652. enum data_stall_log_event_indicator indicator,
  653. enum data_stall_log_event_type data_stall_type,
  654. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  655. enum data_stall_log_recovery_type recovery_type);
  656. QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
  657. QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
  658. int (*get_opmode)(struct cdp_vdev *vdev);
  659. void (*mark_first_wakeup_packet)(uint8_t value);
  660. void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
  661. void (*flush_rx_frames)(void *peer, bool drop);
  662. A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
  663. uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
  664. void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
  665. void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
  666. };
  667. /**
  668. * struct cdp_tx_delay_ops - mcl tx delay ops
  669. * @tx_delay:
  670. * @tx_delay_hist:
  671. * @tx_packet_count:
  672. * @tx_set_compute_interval:
  673. */
  674. struct cdp_tx_delay_ops {
  675. void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
  676. uint32_t *tx_delay_microsec, int category);
  677. void (*tx_delay_hist)(struct cdp_pdev *pdev,
  678. uint16_t *bin_values, int category);
  679. void (*tx_packet_count)(struct cdp_pdev *pdev,
  680. uint16_t *out_packet_count,
  681. uint16_t *out_packet_loss_count, int category);
  682. void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
  683. uint32_t interval);
  684. };
  685. /**
  686. * struct cdp_pmf_ops - mcl protected management frame ops
  687. * @get_pn_info:
  688. */
  689. struct cdp_pmf_ops {
  690. void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
  691. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  692. };
  693. /**
  694. * struct cdp_cfg_ops - mcl configuration ops
  695. * @set_cfg_rx_fwd_disabled:
  696. * @set_cfg_packet_log_enabled:
  697. * @cfg_attach:
  698. * @vdev_rx_set_intrabss_fwd:
  699. * @get_opmode:
  700. * @is_rx_fwd_disabled:
  701. * @tx_set_is_mgmt_over_wmi_enabled:
  702. * @is_high_latency:
  703. * @set_flow_control_parameters:
  704. */
  705. struct cdp_cfg_ops {
  706. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  707. uint8_t disable_rx_fwd);
  708. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  709. uint8_t val);
  710. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  711. void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
  712. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  713. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  714. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  715. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  716. void *param);
  717. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  718. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  719. };
  720. /**
  721. * struct cdp_flowctl_ops - mcl flow control
  722. * @register_pause_cb:
  723. * @set_desc_global_pool_size:
  724. * @dump_flow_pool_info:
  725. */
  726. struct cdp_flowctl_ops {
  727. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  728. tx_pause_callback);
  729. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  730. void (*dump_flow_pool_info)(void *);
  731. };
  732. /**
  733. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  734. * @register_tx_flow_control:
  735. * @deregister_tx_flow_control_cb:
  736. * @flow_control_cb:
  737. * @get_tx_resource:
  738. * @ll_set_tx_pause_q_depth:
  739. * @vdev_flush:
  740. * @vdev_pause:
  741. * @vdev_unpause:
  742. */
  743. struct cdp_lflowctl_ops {
  744. int (*register_tx_flow_control)(uint8_t vdev_id,
  745. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  746. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  747. int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
  748. void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
  749. bool (*get_tx_resource)(uint8_t sta_id,
  750. unsigned int low_watermark,
  751. unsigned int high_watermark_offset);
  752. int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
  753. void (*vdev_flush)(struct cdp_vdev *vdev);
  754. void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason);
  755. void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
  756. };
  757. #ifdef IPA_OFFLOAD
  758. /**
  759. * struct cdp_ipa_ops - mcl ipa data path ops
  760. * @ipa_get_resource:
  761. * @ipa_set_doorbell_paddr:
  762. * @ipa_set_active:
  763. * @ipa_op_response:
  764. * @ipa_register_op_cb:
  765. * @ipa_get_stat:
  766. * @ipa_tx_data_frame:
  767. */
  768. struct cdp_ipa_ops {
  769. QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev);
  770. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev);
  771. QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active,
  772. bool is_tx);
  773. QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
  774. QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev,
  775. void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
  776. void *usr_ctxt);
  777. QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev);
  778. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
  779. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  780. uint32_t value);
  781. #ifdef FEATURE_METERING
  782. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
  783. uint8_t reset_stats);
  784. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
  785. uint64_t quota_bytes);
  786. #endif
  787. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev);
  788. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev);
  789. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  790. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  791. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  792. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  793. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle,
  794. bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in);
  795. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  796. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  797. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  798. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  799. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
  800. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  801. QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
  802. uint32_t rx_pipe_handle);
  803. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  804. qdf_ipa_client_type_t prod_client,
  805. qdf_ipa_client_type_t cons_client,
  806. uint8_t session_id, bool is_ipv6_enabled);
  807. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  808. QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev);
  809. QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev);
  810. QDF_STATUS (*ipa_set_perf_level)(int client,
  811. uint32_t max_supported_bw_mbps);
  812. };
  813. #endif
  814. /**
  815. * struct cdp_bus_ops - mcl bus suspend/resume ops
  816. * @bus_suspend:
  817. * @bus_resume:
  818. */
  819. struct cdp_bus_ops {
  820. QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev);
  821. QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev);
  822. };
  823. /**
  824. * struct cdp_ocb_ops - mcl ocb ops
  825. * @set_ocb_chan_info:
  826. * @get_ocb_chan_info:
  827. */
  828. struct cdp_ocb_ops {
  829. void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
  830. struct ol_txrx_ocb_set_chan ocb_set_chan);
  831. struct ol_txrx_ocb_chan_info *
  832. (*get_ocb_chan_info)(struct cdp_vdev *vdev);
  833. };
  834. /**
  835. * struct cdp_peer_ops - mcl peer related ops
  836. * @register_peer:
  837. * @clear_peer:
  838. * @cfg_attach:
  839. * @find_peer_by_addr:
  840. * @find_peer_by_addr_and_vdev:
  841. * @local_peer_id:
  842. * @peer_find_by_local_id:
  843. * @peer_state_update:
  844. * @get_vdevid:
  845. * @get_vdev_by_sta_id:
  846. * @register_ocb_peer:
  847. * @peer_get_peer_mac_addr:
  848. * @get_peer_state:
  849. * @get_vdev_for_peer:
  850. * @update_ibss_add_peer_num_of_vdev:
  851. * @remove_peers_for_vdev:
  852. * @remove_peers_for_vdev_no_lock:
  853. * @copy_mac_addr_raw:
  854. * @add_last_real_peer:
  855. * @last_assoc_received:
  856. * @last_disassoc_received:
  857. * @last_deauth_received:
  858. * @is_vdev_restore_last_peer:
  859. * @update_last_real_peer:
  860. */
  861. struct cdp_peer_ops {
  862. QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
  863. struct ol_txrx_desc_type *sta_desc);
  864. QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id);
  865. QDF_STATUS (*change_peer_state)(uint8_t sta_id,
  866. enum ol_txrx_peer_state sta_state,
  867. bool roam_synch_in_progress);
  868. void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
  869. u8 *peer_addr, uint8_t *peer_id,
  870. enum peer_debug_id_type debug_id);
  871. void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
  872. void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
  873. uint8_t *peer_addr, uint8_t *peer_id);
  874. void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
  875. struct cdp_vdev *vdev,
  876. uint8_t *peer_addr, uint8_t *peer_id);
  877. uint16_t (*local_peer_id)(void *peer);
  878. void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
  879. uint8_t local_peer_id);
  880. QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
  881. uint8_t *peer_addr,
  882. enum ol_txrx_peer_state state);
  883. QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
  884. struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev,
  885. uint8_t sta_id);
  886. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id);
  887. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  888. int (*get_peer_state)(void *peer);
  889. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  890. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
  891. int16_t peer_num_delta);
  892. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  893. ol_txrx_vdev_peer_remove_cb callback,
  894. void *callback_context, bool remove_last_peer);
  895. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  896. ol_txrx_vdev_peer_remove_cb callback,
  897. void *callback_context);
  898. void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
  899. void (*add_last_real_peer)(struct cdp_pdev *pdev,
  900. struct cdp_vdev *vdev, uint8_t *peer_id);
  901. qdf_time_t * (*last_assoc_received)(void *peer);
  902. qdf_time_t * (*last_disassoc_received)(void *peer);
  903. qdf_time_t * (*last_deauth_received)(void *peer);
  904. bool (*is_vdev_restore_last_peer)(void *peer);
  905. void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer,
  906. uint8_t *peer_id, bool restore_last_peer);
  907. void (*peer_detach_force_delete)(void *peer);
  908. };
  909. /**
  910. * struct cdp_ocb_ops - mcl ocb ops
  911. * @throttle_init_period:
  912. * @throttle_set_level:
  913. */
  914. struct cdp_throttle_ops {
  915. void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
  916. uint8_t *dutycycle_level);
  917. void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
  918. };
  919. /**
  920. * struct cdp_ocb_ops - mcl ocb ops
  921. * @clear_stats:
  922. * @stats:
  923. */
  924. struct cdp_mob_stats_ops {
  925. void (*clear_stats)(uint16_t bitmap);
  926. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  927. };
  928. #endif /* CONFIG_WIN */
  929. struct cdp_ops {
  930. struct cdp_cmn_ops *cmn_drv_ops;
  931. struct cdp_ctrl_ops *ctrl_ops;
  932. struct cdp_me_ops *me_ops;
  933. struct cdp_mon_ops *mon_ops;
  934. struct cdp_host_stats_ops *host_stats_ops;
  935. struct cdp_wds_ops *wds_ops;
  936. struct cdp_raw_ops *raw_ops;
  937. struct cdp_pflow_ops *pflow_ops;
  938. #ifndef CONFIG_WIN
  939. struct cdp_misc_ops *misc_ops;
  940. struct cdp_cfg_ops *cfg_ops;
  941. struct cdp_flowctl_ops *flowctl_ops;
  942. struct cdp_lflowctl_ops *l_flowctl_ops;
  943. #ifdef IPA_OFFLOAD
  944. struct cdp_ipa_ops *ipa_ops;
  945. #endif
  946. struct cdp_bus_ops *bus_ops;
  947. struct cdp_ocb_ops *ocb_ops;
  948. struct cdp_peer_ops *peer_ops;
  949. struct cdp_throttle_ops *throttle_ops;
  950. struct cdp_mob_stats_ops *mob_stats_ops;
  951. struct cdp_tx_delay_ops *delay_ops;
  952. struct cdp_pmf_ops *pmf_ops;
  953. #endif /* CONFIG_WIN */
  954. };
  955. #endif