cdp_txrx_ops.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867
  1. /*
  2. * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #ifdef CONFIG_WIN
  28. #include <cdp_txrx_stats_struct.h>
  29. #endif
  30. #include "cdp_txrx_handle.h"
  31. /******************************************************************************
  32. *
  33. * Control Interface (A Interface)
  34. *
  35. *****************************************************************************/
  36. struct cdp_cmn_ops {
  37. int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  38. int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
  39. struct cdp_vdev *(*txrx_vdev_attach)
  40. (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
  41. uint8_t vdev_id, enum wlan_op_mode op_mode);
  42. void (*txrx_vdev_detach)
  43. (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
  44. void *cb_context);
  45. struct cdp_pdev *(*txrx_pdev_attach)
  46. (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
  47. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
  48. int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
  49. void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
  50. void *(*txrx_peer_create)
  51. (struct cdp_vdev *vdev, uint8_t *peer_mac_addr);
  52. void (*txrx_peer_setup)
  53. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  54. void (*txrx_peer_teardown)
  55. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  56. void (*txrx_peer_delete)(void *peer);
  57. int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev);
  58. void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
  59. void (*txrx_set_privacy_filters)
  60. (struct cdp_vdev *vdev, void *filter, uint32_t num);
  61. /********************************************************************
  62. * Data Interface (B Interface)
  63. ********************************************************************/
  64. void (*txrx_vdev_register)(struct cdp_vdev *vdev,
  65. void *osif_vdev, struct ol_txrx_ops *txrx_ops);
  66. int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
  67. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  68. int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
  69. qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
  70. uint16_t chanfreq);
  71. /**
  72. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  73. * callback function
  74. */
  75. void (*txrx_mgmt_tx_cb_set)
  76. (struct cdp_pdev *pdev, uint8_t type,
  77. ol_txrx_mgmt_tx_cb download_cb, ol_txrx_mgmt_tx_cb ota_ack_cb,
  78. void *ctxt);
  79. int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
  80. /**
  81. * ol_txrx_data_tx_cb - Function registered with the data path
  82. * that is called when tx frames marked as "no free" are
  83. * done being transmitted
  84. */
  85. void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
  86. ol_txrx_data_tx_cb callback, void *ctxt);
  87. /*******************************************************************
  88. * Statistics and Debugging Interface (C Inteface)
  89. ********************************************************************/
  90. int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
  91. int max_subfrms_amsdu);
  92. A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
  93. struct ol_txrx_stats_req *req,
  94. bool per_vdev, bool response_expected);
  95. int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
  96. void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
  97. uint8_t cfg_stats_type, uint32_t cfg_val);
  98. void (*txrx_print_level_set)(unsigned level);
  99. /**
  100. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  101. * @vdev: vdev handle
  102. *
  103. * Return: vdev mac address
  104. */
  105. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
  106. /**
  107. * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  108. * vdev
  109. * @vdev: vdev handle
  110. *
  111. * Return: Handle to struct qdf_mac_addr
  112. */
  113. struct qdf_mac_addr *
  114. (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
  115. /**
  116. * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
  117. * @vdev: vdev handle
  118. *
  119. * Return: Handle to pdev
  120. */
  121. struct cdp_pdev *(*txrx_get_pdev_from_vdev)
  122. (struct cdp_vdev *vdev);
  123. /**
  124. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  125. * @vdev: vdev handle
  126. *
  127. * Return: Handle to control pdev
  128. */
  129. struct cdp_cfg *
  130. (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
  131. struct cdp_vdev *
  132. (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
  133. uint8_t vdev_id);
  134. void (*txrx_soc_detach)(void *soc);
  135. int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
  136. uint16_t tid, uint16_t batimeout, uint16_t buffersize,
  137. uint16_t startseqnum);
  138. void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
  139. uint8_t *dialogtoken, uint16_t *statuscode,
  140. uint16_t *buffersize, uint16_t *batimeout);
  141. int (*delba_process)(void *peer_handle,
  142. int tid, uint16_t reasoncode);
  143. uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
  144. uint16_t peer_id, uint8_t *mac_addr);
  145. };
  146. struct cdp_ctrl_ops {
  147. int
  148. (*txrx_mempools_attach)(void *ctrl_pdev);
  149. int
  150. (*txrx_set_filter_neighbour_peers)(
  151. struct cdp_pdev *pdev,
  152. u_int32_t val);
  153. /**
  154. * @brief set the safemode of the device
  155. * @details
  156. * This flag is used to bypass the encrypt and decrypt processes when
  157. * send and receive packets. It works like open AUTH mode, HW will
  158. * ctreate all packets as non-encrypt frames because no key installed.
  159. * For rx fragmented frames,it bypasses all the rx defragmentaion.
  160. *
  161. * @param vdev - the data virtual device object
  162. * @param val - the safemode state
  163. * @return - void
  164. */
  165. void
  166. (*txrx_set_safemode)(
  167. struct cdp_vdev *vdev,
  168. u_int32_t val);
  169. /**
  170. * @brief configure the drop unencrypted frame flag
  171. * @details
  172. * Rx related. When set this flag, all the unencrypted frames
  173. * received over a secure connection will be discarded
  174. *
  175. * @param vdev - the data virtual device object
  176. * @param val - flag
  177. * @return - void
  178. */
  179. void
  180. (*txrx_set_drop_unenc)(
  181. struct cdp_vdev *vdev,
  182. u_int32_t val);
  183. /**
  184. * @brief set the Tx encapsulation type of the VDEV
  185. * @details
  186. * This will be used to populate the HTT desc packet type field
  187. * during Tx
  188. * @param vdev - the data virtual device object
  189. * @param val - the Tx encap type
  190. * @return - void
  191. */
  192. void
  193. (*txrx_set_tx_encap_type)(
  194. struct cdp_vdev *vdev,
  195. enum htt_cmn_pkt_type val);
  196. /**
  197. * @brief set the Rx decapsulation type of the VDEV
  198. * @details
  199. * This will be used to configure into firmware and hardware
  200. * which format to decap all Rx packets into, for all peers under
  201. * the VDEV.
  202. * @param vdev - the data virtual device object
  203. * @param val - the Rx decap mode
  204. * @return - void
  205. */
  206. void
  207. (*txrx_set_vdev_rx_decap_type)(
  208. struct cdp_vdev *vdev,
  209. enum htt_cmn_pkt_type val);
  210. /**
  211. * @brief get the Rx decapsulation type of the VDEV
  212. *
  213. * @param vdev - the data virtual device object
  214. * @return - the Rx decap type
  215. */
  216. enum htt_cmn_pkt_type
  217. (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
  218. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  219. /**
  220. * @brief Update the authorize peer object at association time
  221. * @details
  222. * For the host-based implementation of rate-control, it
  223. * updates the peer/node-related parameters within rate-control
  224. * context of the peer at association.
  225. *
  226. * @param peer - pointer to the node's object
  227. * @authorize - either to authorize or unauthorize peer
  228. *
  229. * @return none
  230. */
  231. void
  232. (*txrx_peer_authorize)(void *peer,
  233. u_int32_t authorize);
  234. bool
  235. (*txrx_set_inact_params)(struct cdp_pdev *pdev,
  236. u_int16_t inact_check_interval,
  237. u_int16_t inact_normal,
  238. u_int16_t inact_overload);
  239. bool
  240. (*txrx_start_inact_timer)(
  241. struct cdp_pdev *pdev,
  242. bool enable);
  243. /**
  244. * @brief Set the overload status of the radio
  245. * @details
  246. * Set the overload status of the radio, updating the inactivity
  247. * threshold and inactivity count for each node.
  248. *
  249. * @param pdev - the data physical device object
  250. * @param overload - whether the radio is overloaded or not
  251. */
  252. void (*txrx_set_overload)(
  253. struct cdp_pdev *pdev,
  254. bool overload);
  255. /**
  256. * @brief Check the inactivity status of the peer/node
  257. *
  258. * @param peer - pointer to the node's object
  259. * @return true if the node is inactive; otherwise return false
  260. */
  261. bool
  262. (*txrx_peer_is_inact)(void *peer);
  263. /**
  264. * @brief Mark inactivity status of the peer/node
  265. * @details
  266. * If it becomes active, reset inactivity count to reload value;
  267. * if the inactivity status changed, notify umac band steering.
  268. *
  269. * @param peer - pointer to the node's object
  270. * @param inactive - whether the node is inactive or not
  271. */
  272. void (*txrx_mark_peer_inact)(
  273. void *peer,
  274. bool inactive);
  275. /* Should be ol_txrx_ctrl_api.h */
  276. void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
  277. /**
  278. * @brief setting mesh rx filter
  279. * @details
  280. * based on the bits enabled in the filter packets has to be dropped.
  281. *
  282. * @param vdev - the data virtual device object
  283. * @param val - value to set
  284. */
  285. void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
  286. void (*tx_flush_buffers)(struct cdp_vdev *vdev);
  287. int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
  288. };
  289. struct cdp_me_ops {
  290. u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
  291. (struct cdp_pdev *pdev, u_int16_t buf_count);
  292. u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
  293. struct cdp_pdev *pdev,
  294. u_int16_t buf_count);
  295. u_int16_t
  296. (*tx_get_mcast_buf_allocated_marked)
  297. (struct cdp_pdev *pdev);
  298. void
  299. (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
  300. void
  301. (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
  302. uint16_t
  303. (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
  304. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  305. uint8_t newmaccnt);
  306. /* Should be a function pointer in ol_txrx_osif_ops{} */
  307. /**
  308. * @brief notify mcast frame indication from FW.
  309. * @details
  310. * This notification will be used to convert
  311. * multicast frame to unicast.
  312. *
  313. * @param pdev - handle to the ctrl SW's physical device object
  314. * @param vdev_id - ID of the virtual device received the special data
  315. * @param msdu - the multicast msdu returned by FW for host inspect
  316. */
  317. int (*mcast_notify)(struct cdp_pdev *pdev,
  318. u_int8_t vdev_id, qdf_nbuf_t msdu);
  319. };
  320. struct cdp_mon_ops {
  321. void (*txrx_monitor_set_filter_ucast_data)
  322. (struct cdp_pdev *, u_int8_t val);
  323. void (*txrx_monitor_set_filter_mcast_data)
  324. (struct cdp_pdev *, u_int8_t val);
  325. void (*txrx_monitor_set_filter_non_data)
  326. (struct cdp_pdev *, u_int8_t val);
  327. u_int8_t (*txrx_monitor_get_filter_ucast_data)
  328. (struct cdp_vdev *vdev_txrx_handle);
  329. u_int8_t (*txrx_monitor_get_filter_mcast_data)
  330. (struct cdp_vdev *vdev_txrx_handle);
  331. u_int8_t (*txrx_monitor_get_filter_non_data)
  332. (struct cdp_vdev *vdev_txrx_handle);
  333. int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
  334. };
  335. struct cdp_host_stats_ops {
  336. int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
  337. struct ol_txrx_stats_req *req,
  338. enum cdp_host_txrx_stats type);
  339. void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
  340. int (*txrx_stats_publish)(struct cdp_pdev *pdev,
  341. void *buf);
  342. /**
  343. * @brief Enable enhanced stats functionality.
  344. *
  345. * @param pdev - the physical device object
  346. * @return - void
  347. */
  348. void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
  349. /**
  350. * @brief Disable enhanced stats functionality.
  351. *
  352. * @param pdev - the physical device object
  353. * @return - void
  354. */
  355. void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
  356. /**
  357. * @brief Get the desired stats from the message.
  358. *
  359. * @param pdev - the physical device object
  360. * @param stats_base - stats buffer recieved from FW
  361. * @param type - stats type.
  362. * @return - pointer to requested stat identified by type
  363. */
  364. uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
  365. uint32_t *stats_base, uint32_t msg_len, uint8_t type);
  366. void
  367. (*tx_print_tso_stats)(struct cdp_vdev *vdev);
  368. void
  369. (*tx_rst_tso_stats)(struct cdp_vdev *vdev);
  370. void
  371. (*tx_print_sg_stats)(struct cdp_vdev *vdev);
  372. void
  373. (*tx_rst_sg_stats)(struct cdp_vdev *vdev);
  374. void
  375. (*print_rx_cksum_stats)(struct cdp_vdev *vdev);
  376. void
  377. (*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
  378. A_STATUS
  379. (*txrx_host_me_stats)(struct cdp_vdev *vdev);
  380. void
  381. (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
  382. int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
  383. struct ol_txrx_stats_req *req);
  384. void
  385. (*print_lro_stats)(struct cdp_vdev *vdev);
  386. void
  387. (*reset_lro_stats)(struct cdp_vdev *vdev);
  388. };
  389. struct cdp_wds_ops {
  390. void
  391. (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
  392. u_int32_t val);
  393. };
  394. struct cdp_raw_ops {
  395. int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
  396. void (*rsim_get_astentry)(struct cdp_vdev *vdev,
  397. qdf_nbuf_t *pnbuf,
  398. struct cdp_raw_ast *raw_ast);
  399. };
  400. #ifdef CONFIG_WIN
  401. struct cdp_pflow_ops {
  402. uint32_t(*pflow_update_pdev_params)(void *,
  403. ol_ath_param_t, uint32_t, void *);
  404. };
  405. #endif /* CONFIG_WIN */
  406. #define LRO_IPV4_SEED_ARR_SZ 5
  407. #define LRO_IPV6_SEED_ARR_SZ 11
  408. /**
  409. * struct cdp_lro_config - set LRO init parameters
  410. * @lro_enable: indicates whether lro is enabled
  411. * @tcp_flag: If the TCP flags from the packet do not match
  412. * the values in this field after masking with TCP flags mask
  413. * below, packet is not LRO eligible
  414. * @tcp_flag_mask: field for comparing the TCP values provided
  415. * above with the TCP flags field in the received packet
  416. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  417. * 5-tuple toeplitz hash for ipv4 packets
  418. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  419. * 5-tuple toeplitz hash for ipv6 packets
  420. */
  421. struct cdp_lro_hash_config {
  422. uint32_t lro_enable;
  423. uint32_t tcp_flag:9,
  424. tcp_flag_mask:9;
  425. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  426. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  427. };
  428. struct ol_if_ops {
  429. void (*peer_set_default_routing)(void *scn_handle,
  430. uint8_t *peer_macaddr, uint8_t vdev_id,
  431. bool hash_based, uint8_t ring_num);
  432. int (*peer_rx_reorder_queue_setup)(void *scn_handle,
  433. uint8_t vdev_id, uint8_t *peer_mac,
  434. qdf_dma_addr_t hw_qdesc, int tid, uint16_t queue_num);
  435. int (*peer_rx_reorder_queue_remove)(void *scn_handle,
  436. uint8_t vdev_id, uint8_t *peer_macaddr,
  437. uint32_t tid_mask);
  438. int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id,
  439. uint8_t *peer_macaddr);
  440. bool (*is_hw_dbs_2x2_capable)(void);
  441. int (*peer_add_wds_entry)(void *ol_soc_handle,
  442. const uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  443. uint32_t flags);
  444. int (*peer_update_wds_entry)(void *ol_soc_handle,
  445. uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  446. uint32_t flags);
  447. void (*peer_del_wds_entry)(void *ol_soc_handle,
  448. uint8_t *wds_macaddr);
  449. QDF_STATUS (*lro_hash_config)(void *scn_handle,
  450. struct cdp_lro_hash_config *lro_hash);
  451. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  452. uint8_t type);
  453. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  454. };
  455. #ifndef CONFIG_WIN
  456. /* From here MCL specific OPs */
  457. /**
  458. * struct cdp_misc_ops - mcl ops not classified
  459. * @set_ibss_vdev_heart_beat_timer:
  460. * @bad_peer_txctl_set_setting:
  461. * @bad_peer_txctl_update_threshold:
  462. * @hl_tdls_flag_reset:
  463. * @tx_non_std:
  464. * @get_vdev_id:
  465. * @set_wisa_mode:
  466. * @runtime_suspend:
  467. * @runtime_resume:
  468. */
  469. struct cdp_misc_ops {
  470. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
  471. uint16_t timer_value_sec);
  472. void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
  473. struct ol_tx_wmm_param_t wmm_param);
  474. void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
  475. int period, int txq_limit);
  476. void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
  477. int level, int tput_thresh, int tx_limit);
  478. void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
  479. qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
  480. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  481. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  482. QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
  483. QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
  484. QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
  485. int (*get_opmode)(struct cdp_vdev *vdev);
  486. void (*mark_first_wakeup_packet)(uint8_t value);
  487. void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
  488. void (*flush_rx_frames)(void *peer, bool drop);
  489. A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
  490. uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
  491. void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
  492. void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
  493. };
  494. /**
  495. * struct cdp_tx_delay_ops - mcl tx delay ops
  496. * @tx_delay:
  497. * @tx_delay_hist:
  498. * @tx_packet_count:
  499. * @tx_set_compute_interval:
  500. */
  501. struct cdp_tx_delay_ops {
  502. void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
  503. uint32_t *tx_delay_microsec, int category);
  504. void (*tx_delay_hist)(struct cdp_pdev *pdev,
  505. uint16_t *bin_values, int category);
  506. void (*tx_packet_count)(struct cdp_pdev *pdev,
  507. uint16_t *out_packet_count,
  508. uint16_t *out_packet_loss_count, int category);
  509. void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
  510. uint32_t interval);
  511. };
  512. /**
  513. * struct cdp_pmf_ops - mcl protected management frame ops
  514. * @get_pn_info:
  515. */
  516. struct cdp_pmf_ops {
  517. void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
  518. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  519. };
  520. /**
  521. * struct cdp_cfg_ops - mcl configuration ops
  522. * @set_cfg_rx_fwd_disabled:
  523. * @set_cfg_packet_log_enabled:
  524. * @cfg_attach:
  525. * @vdev_rx_set_intrabss_fwd:
  526. * @get_opmode:
  527. * @is_rx_fwd_disabled:
  528. * @tx_set_is_mgmt_over_wmi_enabled:
  529. * @is_high_latency:
  530. * @set_flow_control_parameters:
  531. */
  532. struct cdp_cfg_ops {
  533. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  534. uint8_t disable_rx_fwd);
  535. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  536. uint8_t val);
  537. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  538. void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
  539. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  540. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  541. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  542. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  543. void *param);
  544. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  545. };
  546. /**
  547. * struct cdp_flowctl_ops - mcl flow control
  548. * @register_pause_cb:
  549. * @set_desc_global_pool_size:
  550. * @dump_flow_pool_info:
  551. */
  552. struct cdp_flowctl_ops {
  553. QDF_STATUS (*register_pause_cb)(ol_tx_pause_callback_fp);
  554. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  555. void (*dump_flow_pool_info)(void);
  556. };
  557. /**
  558. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  559. * @register_tx_flow_control:
  560. * @deregister_tx_flow_control_cb:
  561. * @flow_control_cb:
  562. * @get_tx_resource:
  563. * @ll_set_tx_pause_q_depth:
  564. * @vdev_flush:
  565. * @vdev_pause:
  566. * @vdev_unpause:
  567. */
  568. struct cdp_lflowctl_ops {
  569. int (*register_tx_flow_control)(uint8_t vdev_id,
  570. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx);
  571. int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
  572. void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
  573. bool (*get_tx_resource)(uint8_t sta_id,
  574. unsigned int low_watermark,
  575. unsigned int high_watermark_offset);
  576. int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
  577. void (*vdev_flush)(struct cdp_vdev *vdev);
  578. void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason);
  579. void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
  580. };
  581. /**
  582. * struct cdp_ipa_ops - mcl ipa data path ops
  583. * @ipa_get_resource:
  584. * @ipa_set_doorbell_paddr:
  585. * @ipa_set_active:
  586. * @ipa_op_response:
  587. * @ipa_register_op_cb:
  588. * @ipa_get_stat:
  589. * @ipa_tx_data_frame:
  590. */
  591. struct cdp_ipa_ops {
  592. void (*ipa_get_resource)(struct cdp_pdev *pdev,
  593. struct ol_txrx_ipa_resources *ipa_res);
  594. void (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev,
  595. qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
  596. qdf_dma_addr_t ipa_rx_uc_doorbell_paddr);
  597. void (*ipa_set_active)(struct cdp_pdev *pdev,
  598. bool uc_active, bool is_tx);
  599. void (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
  600. void (*ipa_register_op_cb)(struct cdp_pdev *pdev,
  601. void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
  602. void *osif_dev);
  603. void (*ipa_get_stat)(struct cdp_pdev *pdev);
  604. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
  605. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *cfg_pdev,
  606. uint32_t value);
  607. };
  608. /**
  609. * struct cdp_lro_ops - mcl large receive offload ops
  610. * @register_lro_flush_cb:
  611. * @deregister_lro_flush_cb:
  612. */
  613. struct cdp_lro_ops {
  614. void (*register_lro_flush_cb)(void (lro_flush_cb)(void *),
  615. void *(lro_init_cb)(void));
  616. void (*deregister_lro_flush_cb)(void (lro_deinit_cb)(void *));
  617. };
  618. /**
  619. * struct cdp_bus_ops - mcl bus suspend/resume ops
  620. * @bus_suspend:
  621. * @bus_resume:
  622. */
  623. struct cdp_bus_ops {
  624. QDF_STATUS (*bus_suspend)(void);
  625. QDF_STATUS (*bus_resume)(void);
  626. };
  627. /**
  628. * struct cdp_ocb_ops - mcl ocb ops
  629. * @set_ocb_chan_info:
  630. * @get_ocb_chan_info:
  631. */
  632. struct cdp_ocb_ops {
  633. void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
  634. struct ol_txrx_ocb_set_chan ocb_set_chan);
  635. struct ol_txrx_ocb_chan_info *
  636. (*get_ocb_chan_info)(struct cdp_vdev *vdev);
  637. };
  638. /**
  639. * struct cdp_peer_ops - mcl peer related ops
  640. * @register_peer:
  641. * @clear_peer:
  642. * @cfg_attach:
  643. * @find_peer_by_addr:
  644. * @find_peer_by_addr_and_vdev:
  645. * @local_peer_id:
  646. * @peer_find_by_local_id:
  647. * @peer_state_update:
  648. * @get_vdevid:
  649. * @get_vdev_by_sta_id:
  650. * @register_ocb_peer:
  651. * @peer_get_peer_mac_addr:
  652. * @get_peer_state:
  653. * @get_vdev_for_peer:
  654. * @update_ibss_add_peer_num_of_vdev:
  655. * @remove_peers_for_vdev:
  656. * @remove_peers_for_vdev_no_lock:
  657. * @copy_mac_addr_raw:
  658. * @add_last_real_peer:
  659. * @last_assoc_received:
  660. * @last_disassoc_received:
  661. * @last_deauth_received:
  662. * @is_vdev_restore_last_peer:
  663. * @update_last_real_peer:
  664. */
  665. struct cdp_peer_ops {
  666. QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
  667. struct ol_txrx_desc_type *sta_desc);
  668. QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id);
  669. QDF_STATUS (*change_peer_state)(uint8_t sta_id,
  670. enum ol_txrx_peer_state sta_state,
  671. bool roam_synch_in_progress);
  672. void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
  673. uint8_t *peer_addr, uint8_t *peer_id);
  674. void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
  675. struct cdp_vdev *vdev,
  676. uint8_t *peer_addr, uint8_t *peer_id);
  677. uint16_t (*local_peer_id)(void *peer);
  678. void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
  679. uint8_t local_peer_id);
  680. QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
  681. uint8_t *peer_addr,
  682. enum ol_txrx_peer_state state);
  683. QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
  684. struct cdp_vdev * (*get_vdev_by_sta_id)(uint8_t sta_id);
  685. QDF_STATUS (*register_ocb_peer)(void *cds_ctx, uint8_t *mac_addr,
  686. uint8_t *peer_id);
  687. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  688. int (*get_peer_state)(void *peer);
  689. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  690. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
  691. int16_t peer_num_delta);
  692. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  693. ol_txrx_vdev_peer_remove_cb callback,
  694. void *callback_context, bool remove_last_peer);
  695. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  696. ol_txrx_vdev_peer_remove_cb callback,
  697. void *callback_context);
  698. void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
  699. void (*add_last_real_peer)(struct cdp_pdev *pdev,
  700. struct cdp_vdev *vdev, uint8_t *peer_id);
  701. qdf_time_t * (*last_assoc_received)(void *peer);
  702. qdf_time_t * (*last_disassoc_received)(void *peer);
  703. qdf_time_t * (*last_deauth_received)(void *peer);
  704. bool (*is_vdev_restore_last_peer)(void *peer);
  705. void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer,
  706. uint8_t *peer_id, bool restore_last_peer);
  707. void (*peer_detach_force_delete)(void *peer);
  708. };
  709. /**
  710. * struct cdp_ocb_ops - mcl ocb ops
  711. * @throttle_init_period:
  712. * @throttle_set_level:
  713. */
  714. struct cdp_throttle_ops {
  715. void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
  716. uint8_t *dutycycle_level);
  717. void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
  718. };
  719. /**
  720. * struct cdp_ocb_ops - mcl ocb ops
  721. * @display_stats:
  722. * @clear_stats:
  723. * @stats:
  724. */
  725. struct cdp_mob_stats_ops {
  726. void (*display_stats)(uint16_t bitmap);
  727. void (*clear_stats)(uint16_t bitmap);
  728. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  729. };
  730. #endif /* CONFIG_WIN */
  731. struct cdp_ops {
  732. struct cdp_cmn_ops *cmn_drv_ops;
  733. struct cdp_ctrl_ops *ctrl_ops;
  734. struct cdp_me_ops *me_ops;
  735. struct cdp_mon_ops *mon_ops;
  736. struct cdp_host_stats_ops *host_stats_ops;
  737. struct cdp_wds_ops *wds_ops;
  738. struct cdp_raw_ops *raw_ops;
  739. struct cdp_pflow_ops *pflow_ops;
  740. #ifndef CONFIG_WIN
  741. struct cdp_misc_ops *misc_ops;
  742. struct cdp_cfg_ops *cfg_ops;
  743. struct cdp_flowctl_ops *flowctl_ops;
  744. struct cdp_lflowctl_ops *l_flowctl_ops;
  745. struct cdp_ipa_ops *ipa_ops;
  746. struct cdp_lro_ops *lro_ops;
  747. struct cdp_bus_ops *bus_ops;
  748. struct cdp_ocb_ops *ocb_ops;
  749. struct cdp_peer_ops *peer_ops;
  750. struct cdp_throttle_ops *throttle_ops;
  751. struct cdp_mob_stats_ops *mob_stats_ops;
  752. struct cdp_tx_delay_ops *delay_ops;
  753. struct cdp_pmf_ops *pmf_ops;
  754. #endif /* CONFIG_WIN */
  755. };
  756. #endif