cdp_txrx_ops.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801
  1. /*
  2. * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #ifdef CONFIG_WIN
  28. #include <cdp_txrx_stats_struct.h>
  29. #endif
  30. /******************************************************************************
  31. *
  32. * Control Interface (A Interface)
  33. *
  34. *****************************************************************************/
  35. struct cdp_cmn_ops {
  36. int(*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  37. int(*txrx_pdev_attach_target)(void *pdev);
  38. void *(*txrx_vdev_attach)
  39. (void *pdev, uint8_t *vdev_mac_addr,
  40. uint8_t vdev_id, enum wlan_op_mode op_mode);
  41. void(*txrx_vdev_detach)
  42. (void *vdev, ol_txrx_vdev_delete_cb callback,
  43. void *cb_context);
  44. void *(*txrx_pdev_attach)
  45. (ol_txrx_soc_handle soc, void *ctrl_pdev,
  46. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
  47. int (*txrx_pdev_post_attach)(void *pdev);
  48. void(*txrx_pdev_detach)(void *pdev, int force);
  49. void *(*txrx_peer_create)
  50. (void *vdev, uint8_t *peer_mac_addr);
  51. void (*txrx_peer_setup)
  52. (void *vdev_hdl, void *peer_hdl);
  53. void (*txrx_peer_teardown)
  54. (void *vdev_hdl, void *peer_hdl);
  55. void(*txrx_peer_delete)(void *peer);
  56. int(*txrx_set_monitor_mode)(void *vdev);
  57. void(*txrx_set_curchan)(void *pdev, uint32_t chan_mhz);
  58. void (*txrx_set_privacy_filters)
  59. (void *vdev, void *filter, uint32_t num);
  60. /********************************************************************
  61. * Data Interface (B Interface)
  62. ********************************************************************/
  63. void(*txrx_vdev_register)(void *vdev,
  64. void *osif_vdev, struct ol_txrx_ops *txrx_ops);
  65. int(*txrx_mgmt_send)(void *vdev,
  66. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  67. int(*txrx_mgmt_send_ext)(void *vdev,
  68. qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
  69. uint16_t chanfreq);
  70. /**
  71. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  72. * callback function
  73. */
  74. void(*txrx_mgmt_tx_cb_set)
  75. (void *pdev, uint8_t type,
  76. ol_txrx_mgmt_tx_cb download_cb, ol_txrx_mgmt_tx_cb ota_ack_cb,
  77. void *ctxt);
  78. int (*txrx_get_tx_pending)(void *pdev);
  79. /**
  80. * ol_txrx_data_tx_cb - Function registered with the data path
  81. * that is called when tx frames marked as "no free" are
  82. * done being transmitted
  83. */
  84. void(*txrx_data_tx_cb_set)(void *data_vdev,
  85. ol_txrx_data_tx_cb callback, void *ctxt);
  86. /*******************************************************************
  87. * Statistics and Debugging Interface (C Inteface)
  88. ********************************************************************/
  89. int(*txrx_aggr_cfg)(void *vdev, int max_subfrms_ampdu,
  90. int max_subfrms_amsdu);
  91. A_STATUS(*txrx_fw_stats_get)(void *vdev, struct ol_txrx_stats_req *req,
  92. bool per_vdev, bool response_expected);
  93. int(*txrx_debug)(void *vdev, int debug_specs);
  94. void(*txrx_fw_stats_cfg)(void *vdev,
  95. uint8_t cfg_stats_type, uint32_t cfg_val);
  96. void(*txrx_print_level_set)(unsigned level);
  97. /**
  98. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  99. * @vdev: vdev handle
  100. *
  101. * Return: vdev mac address
  102. */
  103. uint8_t *(*txrx_get_vdev_mac_addr)(void *vdev);
  104. /**
  105. * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  106. * vdev
  107. * @vdev: vdev handle
  108. *
  109. * Return: Handle to struct qdf_mac_addr
  110. */
  111. struct qdf_mac_addr *
  112. (*txrx_get_vdev_struct_mac_addr)(void *vdev);
  113. /**
  114. * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
  115. * @vdev: vdev handle
  116. *
  117. * Return: Handle to pdev
  118. */
  119. void *(*txrx_get_pdev_from_vdev)
  120. (void *vdev);
  121. /**
  122. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  123. * @vdev: vdev handle
  124. *
  125. * Return: Handle to control pdev
  126. */
  127. void *
  128. (*txrx_get_ctrl_pdev_from_vdev)(void *vdev);
  129. void *
  130. (*txrx_get_vdev_from_vdev_id)(void *pdev, uint8_t vdev_id);
  131. void (*txrx_soc_detach)(void *soc);
  132. int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
  133. uint16_t tid, uint16_t batimeout, uint16_t buffersize,
  134. uint16_t startseqnum);
  135. void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
  136. uint8_t *dialogtoken, uint16_t *statuscode,
  137. uint16_t *buffersize, uint16_t *batimeout);
  138. int (*delba_process)(void *peer_handle,
  139. int tid, uint16_t reasoncode);
  140. };
  141. struct cdp_ctrl_ops {
  142. int
  143. (*txrx_mempools_attach)(void *ctrl_pdev);
  144. int
  145. (*txrx_set_filter_neighbour_peers)(
  146. void *pdev,
  147. u_int32_t val);
  148. /**
  149. * @brief set the safemode of the device
  150. * @details
  151. * This flag is used to bypass the encrypt and decrypt processes when
  152. * send and receive packets. It works like open AUTH mode, HW will
  153. * ctreate all packets as non-encrypt frames because no key installed.
  154. * For rx fragmented frames,it bypasses all the rx defragmentaion.
  155. *
  156. * @param vdev - the data virtual device object
  157. * @param val - the safemode state
  158. * @return - void
  159. */
  160. void
  161. (*txrx_set_safemode)(
  162. void *vdev,
  163. u_int32_t val);
  164. /**
  165. * @brief configure the drop unencrypted frame flag
  166. * @details
  167. * Rx related. When set this flag, all the unencrypted frames
  168. * received over a secure connection will be discarded
  169. *
  170. * @param vdev - the data virtual device object
  171. * @param val - flag
  172. * @return - void
  173. */
  174. void
  175. (*txrx_set_drop_unenc)(
  176. void *vdev,
  177. u_int32_t val);
  178. /**
  179. * @brief set the Tx encapsulation type of the VDEV
  180. * @details
  181. * This will be used to populate the HTT desc packet type field
  182. * during Tx
  183. * @param vdev - the data virtual device object
  184. * @param val - the Tx encap type
  185. * @return - void
  186. */
  187. void
  188. (*txrx_set_tx_encap_type)(
  189. void *vdev,
  190. enum htt_cmn_pkt_type val);
  191. /**
  192. * @brief set the Rx decapsulation type of the VDEV
  193. * @details
  194. * This will be used to configure into firmware and hardware
  195. * which format to decap all Rx packets into, for all peers under
  196. * the VDEV.
  197. * @param vdev - the data virtual device object
  198. * @param val - the Rx decap mode
  199. * @return - void
  200. */
  201. void
  202. (*txrx_set_vdev_rx_decap_type)(
  203. void *vdev,
  204. enum htt_cmn_pkt_type val);
  205. /**
  206. * @brief get the Rx decapsulation type of the VDEV
  207. *
  208. * @param vdev - the data virtual device object
  209. * @return - the Rx decap type
  210. */
  211. enum htt_pkt_type
  212. (*txrx_get_vdev_rx_decap_type)(void *vdev);
  213. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  214. /**
  215. * @brief Update the authorize peer object at association time
  216. * @details
  217. * For the host-based implementation of rate-control, it
  218. * updates the peer/node-related parameters within rate-control
  219. * context of the peer at association.
  220. *
  221. * @param peer - pointer to the node's object
  222. * @authorize - either to authorize or unauthorize peer
  223. *
  224. * @return none
  225. */
  226. void
  227. (*txrx_peer_authorize)(void *peer,
  228. u_int32_t authorize);
  229. bool
  230. (*txrx_set_inact_params)(void *pdev,
  231. u_int16_t inact_check_interval,
  232. u_int16_t inact_normal,
  233. u_int16_t inact_overload);
  234. bool
  235. (*txrx_start_inact_timer)(
  236. void *pdev,
  237. bool enable);
  238. /**
  239. * @brief Set the overload status of the radio
  240. * @details
  241. * Set the overload status of the radio, updating the inactivity
  242. * threshold and inactivity count for each node.
  243. *
  244. * @param pdev - the data physical device object
  245. * @param overload - whether the radio is overloaded or not
  246. */
  247. void (*txrx_set_overload)(
  248. void *pdev,
  249. bool overload);
  250. /**
  251. * @brief Check the inactivity status of the peer/node
  252. *
  253. * @param peer - pointer to the node's object
  254. * @return true if the node is inactive; otherwise return false
  255. */
  256. bool
  257. (*txrx_peer_is_inact)(void *peer);
  258. /**
  259. * @brief Mark inactivity status of the peer/node
  260. * @details
  261. * If it becomes active, reset inactivity count to reload value;
  262. * if the inactivity status changed, notify umac band steering.
  263. *
  264. * @param peer - pointer to the node's object
  265. * @param inactive - whether the node is inactive or not
  266. */
  267. void (*txrx_mark_peer_inact)(
  268. void *peer,
  269. bool inactive);
  270. /* Should be ol_txrx_ctrl_api.h */
  271. void (*txrx_set_mesh_mode)(void *vdev, u_int32_t val);
  272. void (*tx_flush_buffers)(void *vdev);
  273. int (*txrx_is_target_ar900b)(void *vdev);
  274. };
  275. struct cdp_me_ops {
  276. u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
  277. (void *pdev, u_int16_t buf_count);
  278. u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
  279. void *pdev,
  280. u_int16_t buf_count);
  281. u_int16_t
  282. (*tx_get_mcast_buf_allocated_marked)
  283. (void *pdev);
  284. void
  285. (*tx_me_alloc_descriptor)(void *pdev);
  286. void
  287. (*tx_me_free_descriptor)(void *pdev);
  288. uint16_t
  289. (*tx_me_convert_ucast)(void *vdev,
  290. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  291. uint8_t newmaccnt);
  292. /* Should be a function pointer in ol_txrx_osif_ops{} */
  293. /**
  294. * @brief notify mcast frame indication from FW.
  295. * @details
  296. * This notification will be used to convert
  297. * multicast frame to unicast.
  298. *
  299. * @param pdev - handle to the ctrl SW's physical device object
  300. * @param vdev_id - ID of the virtual device received the special data
  301. * @param msdu - the multicast msdu returned by FW for host inspect
  302. */
  303. int (*mcast_notify)(void *pdev,
  304. u_int8_t vdev_id, qdf_nbuf_t msdu);
  305. };
  306. struct cdp_mon_ops {
  307. void (*txrx_monitor_set_filter_ucast_data)
  308. (void *, u_int8_t val);
  309. void (*txrx_monitor_set_filter_mcast_data)
  310. (void *, u_int8_t val);
  311. void (*txrx_monitor_set_filter_non_data)
  312. (void *, u_int8_t val);
  313. u_int8_t (*txrx_monitor_get_filter_ucast_data)
  314. (void *vdev_txrx_handle);
  315. u_int8_t (*txrx_monitor_get_filter_mcast_data)
  316. (void *vdev_txrx_handle);
  317. u_int8_t (*txrx_monitor_get_filter_non_data)
  318. (void *vdev_txrx_handle);
  319. int (*txrx_reset_monitor_mode)(void *pdev);
  320. };
  321. struct cdp_host_stats_ops {
  322. int (*txrx_host_stats_get)(void *vdev,
  323. struct ol_txrx_stats_req *req);
  324. void (*txrx_host_stats_clr)(void *vdev);
  325. void (*txrx_host_ce_stats)(void *vdev);
  326. int (*txrx_stats_publish)(void *pdev,
  327. void *buf);
  328. /**
  329. * @brief Enable enhanced stats functionality.
  330. *
  331. * @param pdev - the physical device object
  332. * @return - void
  333. */
  334. void (*txrx_enable_enhanced_stats)(void *pdev);
  335. /**
  336. * @brief Disable enhanced stats functionality.
  337. *
  338. * @param pdev - the physical device object
  339. * @return - void
  340. */
  341. void (*txrx_disable_enhanced_stats)(void *pdev);
  342. /**
  343. * @brief Get the desired stats from the message.
  344. *
  345. * @param pdev - the physical device object
  346. * @param stats_base - stats buffer recieved from FW
  347. * @param type - stats type.
  348. * @return - pointer to requested stat identified by type
  349. */
  350. uint32_t*(*txrx_get_stats_base)(void *pdev,
  351. uint32_t *stats_base, uint32_t msg_len, uint8_t type);
  352. void
  353. (*tx_print_tso_stats)(void *vdev);
  354. void
  355. (*tx_rst_tso_stats)(void *vdev);
  356. void
  357. (*tx_print_sg_stats)(void *vdev);
  358. void
  359. (*tx_rst_sg_stats)(void *vdev);
  360. void
  361. (*print_rx_cksum_stats)(void *vdev);
  362. void
  363. (*rst_rx_cksum_stats)(void *vdev);
  364. A_STATUS
  365. (*txrx_host_me_stats)(void *vdev);
  366. void
  367. (*txrx_per_peer_stats)(void *pdev, char *addr);
  368. int (*txrx_host_msdu_ttl_stats)(void *vdev,
  369. struct ol_txrx_stats_req *req);
  370. void
  371. (*print_lro_stats)(void *vdev);
  372. void
  373. (*reset_lro_stats)(void *vdev);
  374. };
  375. struct cdp_wds_ops {
  376. void
  377. (*txrx_set_wds_rx_policy)(void *vdev,
  378. u_int32_t val);
  379. };
  380. struct cdp_raw_ops {
  381. int (*txrx_get_nwifi_mode)(void *vdev);
  382. int
  383. (*rsim_tx_encap)(void *vdev, qdf_nbuf_t *pnbuf);
  384. };
  385. #ifdef CONFIG_WIN
  386. struct cdp_pflow_ops {
  387. uint32_t(*pflow_update_pdev_params)(void *,
  388. ol_ath_param_t, uint32_t, void *);
  389. };
  390. #endif /* CONFIG_WIN */
  391. struct ol_if_ops {
  392. void (*peer_set_default_routing)(void *scn_handle,
  393. uint8_t *peer_macaddr, uint8_t vdev_id,
  394. bool hash_based, uint8_t ring_num);
  395. int (*peer_rx_reorder_queue_setup)(void *ol_soc_handle,
  396. uint8_t vdev_id, uint8_t *peer_mac,
  397. qdf_dma_addr_t hw_qdesc, int tid, uint16_t queue_num);
  398. int (*peer_rx_reorder_queue_remove)(void *ol_soc_handle,
  399. uint8_t vdev_id, uint8_t *peer_macaddr,
  400. uint32_t tid_mask);
  401. int (*peer_unref_delete)(void *ol_soc_handle);
  402. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  403. };
  404. #ifndef CONFIG_WIN
  405. /* From here MCL specific OPs */
  406. /**
  407. * struct cdp_misc_ops - mcl ops not classified
  408. * @set_ibss_vdev_heart_beat_timer:
  409. * @bad_peer_txctl_set_setting:
  410. * @bad_peer_txctl_update_threshold:
  411. * @hl_tdls_flag_reset:
  412. * @tx_non_std:
  413. * @get_vdev_id:
  414. * @set_wisa_mode:
  415. * @runtime_suspend:
  416. * @runtime_resume:
  417. */
  418. struct cdp_misc_ops {
  419. uint16_t (*set_ibss_vdev_heart_beat_timer)(void *vdev,
  420. uint16_t timer_value_sec);
  421. void (*set_wmm_param)(void *cfg_pdev,
  422. struct ol_tx_wmm_param_t wmm_param);
  423. void (*bad_peer_txctl_set_setting)(void *pdev, int enable,
  424. int period, int txq_limit);
  425. void (*bad_peer_txctl_update_threshold)(void *pdev,
  426. int level, int tput_thresh, int tx_limit);
  427. void (*hl_tdls_flag_reset)(void *vdev, bool flag);
  428. qdf_nbuf_t (*tx_non_std)(void *vdev,
  429. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  430. uint16_t (*get_vdev_id)(void *vdev);
  431. QDF_STATUS (*set_wisa_mode)(void *vdev, bool enable);
  432. QDF_STATUS (*runtime_suspend)(void *pdev);
  433. QDF_STATUS (*runtime_resume)(void *pdev);
  434. int (*get_opmode)(void *vdev);
  435. void (*mark_first_wakeup_packet)(uint8_t value);
  436. void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
  437. void (*flush_rx_frames)(void *peer, bool drop);
  438. A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
  439. uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
  440. void (*pkt_log_init)(void *handle, void *scn);
  441. void (*pkt_log_con_service)(void *pdev, void *scn);
  442. };
  443. /**
  444. * struct cdp_tx_delay_ops - mcl tx delay ops
  445. * @tx_delay:
  446. * @tx_delay_hist:
  447. * @tx_packet_count:
  448. * @tx_set_compute_interval:
  449. */
  450. struct cdp_tx_delay_ops {
  451. void (*tx_delay)(void *pdev, uint32_t *queue_delay_microsec,
  452. uint32_t *tx_delay_microsec, int category);
  453. void (*tx_delay_hist)(void *pdev,
  454. uint16_t *bin_values, int category);
  455. void (*tx_packet_count)(void *pdev, uint16_t *out_packet_count,
  456. uint16_t *out_packet_loss_count, int category);
  457. void (*tx_set_compute_interval)(void *pdev, uint32_t interval);
  458. };
  459. /**
  460. * struct cdp_pmf_ops - mcl protected management frame ops
  461. * @get_pn_info:
  462. */
  463. struct cdp_pmf_ops {
  464. void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
  465. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  466. };
  467. /**
  468. * struct cdp_cfg_ops - mcl configuration ops
  469. * @set_cfg_rx_fwd_disabled:
  470. * @set_cfg_packet_log_enabled:
  471. * @cfg_attach:
  472. * @vdev_rx_set_intrabss_fwd:
  473. * @get_opmode:
  474. * @is_rx_fwd_disabled:
  475. * @tx_set_is_mgmt_over_wmi_enabled:
  476. * @is_high_latency:
  477. * @set_flow_control_parameters:
  478. */
  479. struct cdp_cfg_ops {
  480. void (*set_cfg_rx_fwd_disabled)(void *pdev, uint8_t disable_rx_fwd);
  481. void (*set_cfg_packet_log_enabled)(void *pdev, uint8_t val);
  482. void * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  483. void (*vdev_rx_set_intrabss_fwd)(void *vdev, bool val);
  484. uint8_t (*is_rx_fwd_disabled)(void *vdev);
  485. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  486. int (*is_high_latency)(void *pdev);
  487. void (*set_flow_control_parameters)(void *cfg, void *param);
  488. void (*set_flow_steering)(void *pdev, uint8_t val);
  489. };
  490. /**
  491. * struct cdp_flowctl_ops - mcl flow control
  492. * @register_pause_cb:
  493. * @set_desc_global_pool_size:
  494. * @dump_flow_pool_info:
  495. */
  496. struct cdp_flowctl_ops {
  497. QDF_STATUS (*register_pause_cb)(ol_tx_pause_callback_fp);
  498. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  499. void (*dump_flow_pool_info)(void);
  500. };
  501. /**
  502. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  503. * @register_tx_flow_control:
  504. * @deregister_tx_flow_control_cb:
  505. * @flow_control_cb:
  506. * @get_tx_resource:
  507. * @ll_set_tx_pause_q_depth:
  508. * @vdev_flush:
  509. * @vdev_pause:
  510. * @vdev_unpause:
  511. */
  512. struct cdp_lflowctl_ops {
  513. int (*register_tx_flow_control)(uint8_t vdev_id,
  514. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx);
  515. int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
  516. void (*flow_control_cb)(void *vdev, bool tx_resume);
  517. bool (*get_tx_resource)(uint8_t sta_id,
  518. unsigned int low_watermark,
  519. unsigned int high_watermark_offset);
  520. int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
  521. void (*vdev_flush)(void *vdev);
  522. void (*vdev_pause)(void *vdev, uint32_t reason);
  523. void (*vdev_unpause)(void *vdev, uint32_t reason);
  524. };
  525. /**
  526. * struct cdp_ipa_ops - mcl ipa data path ops
  527. * @ipa_get_resource:
  528. * @ipa_set_doorbell_paddr:
  529. * @ipa_set_active:
  530. * @ipa_op_response:
  531. * @ipa_register_op_cb:
  532. * @ipa_get_stat:
  533. * @ipa_tx_data_frame:
  534. */
  535. struct cdp_ipa_ops {
  536. void (*ipa_get_resource)(void *pdev,
  537. struct ol_txrx_ipa_resources *ipa_res);
  538. void (*ipa_set_doorbell_paddr)(void *pdev,
  539. qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
  540. qdf_dma_addr_t ipa_rx_uc_doorbell_paddr);
  541. void (*ipa_set_active)(void *pdev, bool uc_active, bool is_tx);
  542. void (*ipa_op_response)(void *pdev, uint8_t *op_msg);
  543. void (*ipa_register_op_cb)(void *pdev,
  544. void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
  545. void *osif_dev);
  546. void (*ipa_get_stat)(void *pdev);
  547. qdf_nbuf_t (*ipa_tx_data_frame)(void *vdev, qdf_nbuf_t skb);
  548. void (*ipa_set_uc_tx_partition_base)(void *pdev, uint32_t value);
  549. };
  550. /**
  551. * struct cdp_lro_ops - mcl large receive offload ops
  552. * @register_lro_flush_cb:
  553. * @deregister_lro_flush_cb:
  554. */
  555. struct cdp_lro_ops {
  556. void (*register_lro_flush_cb)(void (lro_flush_cb)(void *),
  557. void *(lro_init_cb)(void));
  558. void (*deregister_lro_flush_cb)(void (lro_deinit_cb)(void *));
  559. };
  560. /**
  561. * struct cdp_bus_ops - mcl bus suspend/resume ops
  562. * @bus_suspend:
  563. * @bus_resume:
  564. */
  565. struct cdp_bus_ops {
  566. QDF_STATUS (*bus_suspend)(void);
  567. QDF_STATUS (*bus_resume)(void);
  568. };
  569. /**
  570. * struct cdp_ocb_ops - mcl ocb ops
  571. * @set_ocb_chan_info:
  572. * @get_ocb_chan_info:
  573. */
  574. struct cdp_ocb_ops {
  575. void (*set_ocb_chan_info)(void *vdev,
  576. struct ol_txrx_ocb_set_chan ocb_set_chan);
  577. struct ol_txrx_ocb_chan_info * (*get_ocb_chan_info)(void *vdev);
  578. };
  579. /**
  580. * struct cdp_peer_ops - mcl peer related ops
  581. * @register_peer:
  582. * @clear_peer:
  583. * @cfg_attach:
  584. * @find_peer_by_addr:
  585. * @find_peer_by_addr_and_vdev:
  586. * @local_peer_id:
  587. * @peer_find_by_local_id:
  588. * @peer_state_update:
  589. * @get_vdevid:
  590. * @get_vdev_by_sta_id:
  591. * @register_ocb_peer:
  592. * @peer_get_peer_mac_addr:
  593. * @get_peer_state:
  594. * @get_vdev_for_peer:
  595. * @update_ibss_add_peer_num_of_vdev:
  596. * @remove_peers_for_vdev:
  597. * @remove_peers_for_vdev_no_lock:
  598. * @copy_mac_addr_raw:
  599. * @add_last_real_peer:
  600. * @last_assoc_received:
  601. * @last_disassoc_received:
  602. * @last_deauth_received:
  603. * @is_vdev_restore_last_peer:
  604. * @update_last_real_peer:
  605. */
  606. struct cdp_peer_ops {
  607. QDF_STATUS (*register_peer)(void *pdev,
  608. struct ol_txrx_desc_type *sta_desc);
  609. QDF_STATUS (*clear_peer)(void *pdev, uint8_t sta_id);
  610. QDF_STATUS (*change_peer_state)(uint8_t sta_id,
  611. enum ol_txrx_peer_state sta_state,
  612. bool roam_synch_in_progress);
  613. void * (*find_peer_by_addr)(void *pdev,
  614. uint8_t *peer_addr, uint8_t *peer_id);
  615. void * (*find_peer_by_addr_and_vdev)(void *pdev, void *vdev,
  616. uint8_t *peer_addr, uint8_t *peer_id);
  617. uint16_t (*local_peer_id)(void *peer);
  618. void * (*peer_find_by_local_id)(void *pdev, uint8_t local_peer_id);
  619. QDF_STATUS (*peer_state_update)(void *pdev, uint8_t *peer_addr,
  620. enum ol_txrx_peer_state state);
  621. QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
  622. void * (*get_vdev_by_sta_id)(uint8_t sta_id);
  623. QDF_STATUS (*register_ocb_peer)(void *cds_ctx, uint8_t *mac_addr,
  624. uint8_t *peer_id);
  625. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  626. int (*get_peer_state)(void *peer);
  627. void * (*get_vdev_for_peer)(void *peer);
  628. int16_t (*update_ibss_add_peer_num_of_vdev)(void *vdev,
  629. int16_t peer_num_delta);
  630. void (*remove_peers_for_vdev)(void *vdev,
  631. ol_txrx_vdev_peer_remove_cb callback,
  632. void *callback_context, bool remove_last_peer);
  633. void (*remove_peers_for_vdev_no_lock)(void *vdev,
  634. ol_txrx_vdev_peer_remove_cb callback,
  635. void *callback_context);
  636. void (*copy_mac_addr_raw)(void *vdev, uint8_t *bss_addr);
  637. void (*add_last_real_peer)(void *pdev, void *vdev, uint8_t *peer_id);
  638. qdf_time_t * (*last_assoc_received)(void *peer);
  639. qdf_time_t * (*last_disassoc_received)(void *peer);
  640. qdf_time_t * (*last_deauth_received)(void *peer);
  641. bool (*is_vdev_restore_last_peer)(void *peer);
  642. void (*update_last_real_peer)(void *pdev, void *peer,
  643. uint8_t *peer_id, bool restore_last_peer);
  644. void (*peer_detach_force_delete)(void *peer);
  645. };
  646. /**
  647. * struct cdp_ocb_ops - mcl ocb ops
  648. * @throttle_init_period:
  649. * @throttle_set_level:
  650. */
  651. struct cdp_throttle_ops {
  652. void (*throttle_init_period)(void *pdev, int period,
  653. uint8_t *dutycycle_level);
  654. void (*throttle_set_level)(void *pdev, int level);
  655. };
  656. /**
  657. * struct cdp_ocb_ops - mcl ocb ops
  658. * @display_stats:
  659. * @clear_stats:
  660. * @stats:
  661. */
  662. struct cdp_mob_stats_ops {
  663. void (*display_stats)(uint16_t bitmap);
  664. void (*clear_stats)(uint16_t bitmap);
  665. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  666. };
  667. #endif /* CONFIG_WIN */
  668. struct cdp_ops {
  669. struct cdp_cmn_ops *cmn_drv_ops;
  670. struct cdp_ctrl_ops *ctrl_ops;
  671. struct cdp_me_ops *me_ops;
  672. struct cdp_mon_ops *mon_ops;
  673. struct cdp_host_stats_ops *host_stats_ops;
  674. struct cdp_wds_ops *wds_ops;
  675. struct cdp_raw_ops *raw_ops;
  676. struct cdp_pflow_ops *pflow_ops;
  677. #ifndef CONFIG_WIN
  678. struct cdp_misc_ops *misc_ops;
  679. struct cdp_cfg_ops *cfg_ops;
  680. struct cdp_flowctl_ops *flowctl_ops;
  681. struct cdp_lflowctl_ops *l_flowctl_ops;
  682. struct cdp_ipa_ops *ipa_ops;
  683. struct cdp_lro_ops *lro_ops;
  684. struct cdp_bus_ops *bus_ops;
  685. struct cdp_ocb_ops *ocb_ops;
  686. struct cdp_peer_ops *peer_ops;
  687. struct cdp_throttle_ops *throttle_ops;
  688. struct cdp_mob_stats_ops *mob_stats_ops;
  689. struct cdp_tx_delay_ops *delay_ops;
  690. struct cdp_pmf_ops *pmf_ops;
  691. #endif /* CONFIG_WIN */
  692. };
  693. #endif