cdp_txrx_ops.h 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014
  1. /*
  2. * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #ifdef CONFIG_WIN
  28. #include <cdp_txrx_stats_struct.h>
  29. #endif
  30. #include "cdp_txrx_handle.h"
  31. #include <cdp_txrx_mon_struct.h>
  32. #include "wlan_objmgr_psoc_obj.h"
  33. #ifdef IPA_OFFLOAD
  34. #include <qdf_ipa.h>
  35. #endif
  36. /**
  37. * bitmap values to indicate special handling of peer_delete
  38. */
  39. #define CDP_PEER_DELETE_NO_SPECIAL 0
  40. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  41. /******************************************************************************
  42. *
  43. * Control Interface (A Interface)
  44. *
  45. *****************************************************************************/
  46. struct cdp_cmn_ops {
  47. int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  48. int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
  49. struct cdp_vdev *(*txrx_vdev_attach)
  50. (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
  51. uint8_t vdev_id, enum wlan_op_mode op_mode);
  52. void (*txrx_vdev_detach)
  53. (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
  54. void *cb_context);
  55. struct cdp_pdev *(*txrx_pdev_attach)
  56. (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
  57. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
  58. int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
  59. void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
  60. void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
  61. void *(*txrx_peer_create)
  62. (struct cdp_vdev *vdev, uint8_t *peer_mac_addr);
  63. void (*txrx_peer_setup)
  64. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  65. void (*txrx_peer_teardown)
  66. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  67. void (*txrx_peer_delete)(void *peer, uint32_t bitmap);
  68. int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
  69. uint8_t smart_monitor);
  70. void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
  71. void (*txrx_set_privacy_filters)
  72. (struct cdp_vdev *vdev, void *filter, uint32_t num);
  73. /********************************************************************
  74. * Data Interface (B Interface)
  75. ********************************************************************/
  76. void (*txrx_vdev_register)(struct cdp_vdev *vdev,
  77. void *osif_vdev, struct ol_txrx_ops *txrx_ops);
  78. int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
  79. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  80. int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
  81. qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
  82. uint16_t chanfreq);
  83. /**
  84. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  85. * callback function
  86. */
  87. void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type,
  88. ol_txrx_mgmt_tx_cb download_cb,
  89. ol_txrx_mgmt_tx_cb ota_ack_cb,
  90. void *ctxt);
  91. int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
  92. /**
  93. * ol_txrx_data_tx_cb - Function registered with the data path
  94. * that is called when tx frames marked as "no free" are
  95. * done being transmitted
  96. */
  97. void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
  98. ol_txrx_data_tx_cb callback, void *ctxt);
  99. /*******************************************************************
  100. * Statistics and Debugging Interface (C Inteface)
  101. ********************************************************************/
  102. int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
  103. int max_subfrms_amsdu);
  104. A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
  105. struct ol_txrx_stats_req *req,
  106. bool per_vdev, bool response_expected);
  107. int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
  108. void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
  109. uint8_t cfg_stats_type, uint32_t cfg_val);
  110. void (*txrx_print_level_set)(unsigned level);
  111. /**
  112. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  113. * @vdev: vdev handle
  114. *
  115. * Return: vdev mac address
  116. */
  117. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
  118. /**
  119. * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  120. * vdev
  121. * @vdev: vdev handle
  122. *
  123. * Return: Handle to struct qdf_mac_addr
  124. */
  125. struct qdf_mac_addr *
  126. (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
  127. /**
  128. * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
  129. * @vdev: vdev handle
  130. *
  131. * Return: Handle to pdev
  132. */
  133. struct cdp_pdev *(*txrx_get_pdev_from_vdev)
  134. (struct cdp_vdev *vdev);
  135. /**
  136. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  137. * @vdev: vdev handle
  138. *
  139. * Return: Handle to control pdev
  140. */
  141. struct cdp_cfg *
  142. (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
  143. struct cdp_vdev *
  144. (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
  145. uint8_t vdev_id);
  146. void (*txrx_soc_detach)(void *soc);
  147. int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
  148. uint16_t tid, uint16_t batimeout, uint16_t buffersize,
  149. uint16_t startseqnum);
  150. void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
  151. uint8_t *dialogtoken, uint16_t *statuscode,
  152. uint16_t *buffersize, uint16_t *batimeout);
  153. int (*delba_process)(void *peer_handle,
  154. int tid, uint16_t reasoncode);
  155. void (*set_addba_response)(void *peer_handle,
  156. uint8_t tid, uint16_t statuscode);
  157. uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
  158. uint16_t peer_id, uint8_t *mac_addr);
  159. void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
  160. uint8_t map_id);
  161. void (*flush_cache_rx_queue)(void);
  162. void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
  163. uint8_t tos, uint8_t tid);
  164. int (*txrx_stats)(struct cdp_vdev *vdev, enum cdp_stats stats);
  165. int (*txrx_stats_request)(struct cdp_vdev *vdev,
  166. struct cdp_txrx_stats_req *req);
  167. QDF_STATUS (*display_stats)(void *psoc, uint16_t value,
  168. enum qdf_stats_verbosity_level level);
  169. void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
  170. int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
  171. QDF_STATUS (*txrx_intr_attach)(void *soc);
  172. void (*txrx_intr_detach)(void *soc);
  173. void (*set_pn_check)(struct cdp_vdev *vdev,
  174. struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
  175. uint32_t *rx_pn);
  176. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  177. struct cdp_config_params *params);
  178. void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl);
  179. void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl);
  180. };
  181. struct cdp_ctrl_ops {
  182. int
  183. (*txrx_mempools_attach)(void *ctrl_pdev);
  184. int
  185. (*txrx_set_filter_neighbour_peers)(
  186. struct cdp_pdev *pdev,
  187. uint32_t val);
  188. int
  189. (*txrx_update_filter_neighbour_peers)(
  190. struct cdp_pdev *pdev,
  191. uint32_t cmd, uint8_t *macaddr);
  192. /**
  193. * @brief set the safemode of the device
  194. * @details
  195. * This flag is used to bypass the encrypt and decrypt processes when
  196. * send and receive packets. It works like open AUTH mode, HW will
  197. * ctreate all packets as non-encrypt frames because no key installed.
  198. * For rx fragmented frames,it bypasses all the rx defragmentaion.
  199. *
  200. * @param vdev - the data virtual device object
  201. * @param val - the safemode state
  202. * @return - void
  203. */
  204. void
  205. (*txrx_set_safemode)(
  206. struct cdp_vdev *vdev,
  207. u_int32_t val);
  208. /**
  209. * @brief configure the drop unencrypted frame flag
  210. * @details
  211. * Rx related. When set this flag, all the unencrypted frames
  212. * received over a secure connection will be discarded
  213. *
  214. * @param vdev - the data virtual device object
  215. * @param val - flag
  216. * @return - void
  217. */
  218. void
  219. (*txrx_set_drop_unenc)(
  220. struct cdp_vdev *vdev,
  221. u_int32_t val);
  222. /**
  223. * @brief set the Tx encapsulation type of the VDEV
  224. * @details
  225. * This will be used to populate the HTT desc packet type field
  226. * during Tx
  227. * @param vdev - the data virtual device object
  228. * @param val - the Tx encap type
  229. * @return - void
  230. */
  231. void
  232. (*txrx_set_tx_encap_type)(
  233. struct cdp_vdev *vdev,
  234. enum htt_cmn_pkt_type val);
  235. /**
  236. * @brief set the Rx decapsulation type of the VDEV
  237. * @details
  238. * This will be used to configure into firmware and hardware
  239. * which format to decap all Rx packets into, for all peers under
  240. * the VDEV.
  241. * @param vdev - the data virtual device object
  242. * @param val - the Rx decap mode
  243. * @return - void
  244. */
  245. void
  246. (*txrx_set_vdev_rx_decap_type)(
  247. struct cdp_vdev *vdev,
  248. enum htt_cmn_pkt_type val);
  249. /**
  250. * @brief get the Rx decapsulation type of the VDEV
  251. *
  252. * @param vdev - the data virtual device object
  253. * @return - the Rx decap type
  254. */
  255. enum htt_cmn_pkt_type
  256. (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
  257. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  258. /**
  259. * @brief Update the authorize peer object at association time
  260. * @details
  261. * For the host-based implementation of rate-control, it
  262. * updates the peer/node-related parameters within rate-control
  263. * context of the peer at association.
  264. *
  265. * @param peer - pointer to the node's object
  266. * @authorize - either to authorize or unauthorize peer
  267. *
  268. * @return none
  269. */
  270. void
  271. (*txrx_peer_authorize)(struct cdp_peer *peer,
  272. u_int32_t authorize);
  273. bool
  274. (*txrx_set_inact_params)(struct cdp_pdev *pdev,
  275. u_int16_t inact_check_interval,
  276. u_int16_t inact_normal,
  277. u_int16_t inact_overload);
  278. bool
  279. (*txrx_start_inact_timer)(
  280. struct cdp_pdev *pdev,
  281. bool enable);
  282. /**
  283. * @brief Set the overload status of the radio
  284. * @details
  285. * Set the overload status of the radio, updating the inactivity
  286. * threshold and inactivity count for each node.
  287. *
  288. * @param pdev - the data physical device object
  289. * @param overload - whether the radio is overloaded or not
  290. */
  291. void (*txrx_set_overload)(
  292. struct cdp_pdev *pdev,
  293. bool overload);
  294. /**
  295. * @brief Check the inactivity status of the peer/node
  296. *
  297. * @param peer - pointer to the node's object
  298. * @return true if the node is inactive; otherwise return false
  299. */
  300. bool
  301. (*txrx_peer_is_inact)(void *peer);
  302. /**
  303. * @brief Mark inactivity status of the peer/node
  304. * @details
  305. * If it becomes active, reset inactivity count to reload value;
  306. * if the inactivity status changed, notify umac band steering.
  307. *
  308. * @param peer - pointer to the node's object
  309. * @param inactive - whether the node is inactive or not
  310. */
  311. void (*txrx_mark_peer_inact)(
  312. void *peer,
  313. bool inactive);
  314. /* Should be ol_txrx_ctrl_api.h */
  315. void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
  316. /**
  317. * @brief setting mesh rx filter
  318. * @details
  319. * based on the bits enabled in the filter packets has to be dropped.
  320. *
  321. * @param vdev - the data virtual device object
  322. * @param val - value to set
  323. */
  324. void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
  325. void (*tx_flush_buffers)(struct cdp_vdev *vdev);
  326. int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
  327. void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
  328. enum cdp_vdev_param_type param, uint32_t val);
  329. void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
  330. /**
  331. * @brief Set the reo dest ring num of the radio
  332. * @details
  333. * Set the reo destination ring no on which we will receive
  334. * pkts for this radio.
  335. *
  336. * @param pdev - the data physical device object
  337. * @param reo_dest_ring_num - value ranges between 1 - 4
  338. */
  339. void (*txrx_set_pdev_reo_dest)(
  340. struct cdp_pdev *pdev,
  341. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  342. /**
  343. * @brief Get the reo dest ring num of the radio
  344. * @details
  345. * Get the reo destination ring no on which we will receive
  346. * pkts for this radio.
  347. *
  348. * @param pdev - the data physical device object
  349. * @return the reo destination ring number
  350. */
  351. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  352. struct cdp_pdev *pdev);
  353. int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
  354. uint32_t event);
  355. int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
  356. uint32_t event);
  357. int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
  358. void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
  359. uint8_t subtype, uint8_t tx_power);
  360. void (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
  361. enum cdp_pdev_param_type type, uint8_t val);
  362. int (*txrx_wdi_event_handler)(struct cdp_pdev *pdev,
  363. uint32_t event, void *evt_data);
  364. void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
  365. };
  366. struct cdp_me_ops {
  367. u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
  368. (struct cdp_pdev *pdev, u_int16_t buf_count);
  369. u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
  370. struct cdp_pdev *pdev,
  371. u_int16_t buf_count);
  372. u_int16_t
  373. (*tx_get_mcast_buf_allocated_marked)
  374. (struct cdp_pdev *pdev);
  375. void
  376. (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
  377. void
  378. (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
  379. uint16_t
  380. (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
  381. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  382. uint8_t newmaccnt);
  383. /* Should be a function pointer in ol_txrx_osif_ops{} */
  384. /**
  385. * @brief notify mcast frame indication from FW.
  386. * @details
  387. * This notification will be used to convert
  388. * multicast frame to unicast.
  389. *
  390. * @param pdev - handle to the ctrl SW's physical device object
  391. * @param vdev_id - ID of the virtual device received the special data
  392. * @param msdu - the multicast msdu returned by FW for host inspect
  393. */
  394. int (*mcast_notify)(struct cdp_pdev *pdev,
  395. u_int8_t vdev_id, qdf_nbuf_t msdu);
  396. };
  397. struct cdp_mon_ops {
  398. void (*txrx_monitor_set_filter_ucast_data)
  399. (struct cdp_pdev *, u_int8_t val);
  400. void (*txrx_monitor_set_filter_mcast_data)
  401. (struct cdp_pdev *, u_int8_t val);
  402. void (*txrx_monitor_set_filter_non_data)
  403. (struct cdp_pdev *, u_int8_t val);
  404. u_int8_t (*txrx_monitor_get_filter_ucast_data)
  405. (struct cdp_vdev *vdev_txrx_handle);
  406. u_int8_t (*txrx_monitor_get_filter_mcast_data)
  407. (struct cdp_vdev *vdev_txrx_handle);
  408. u_int8_t (*txrx_monitor_get_filter_non_data)
  409. (struct cdp_vdev *vdev_txrx_handle);
  410. int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
  411. /* HK advance monitor filter support */
  412. int (*txrx_set_advance_monitor_filter)
  413. (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
  414. };
  415. struct cdp_host_stats_ops {
  416. int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
  417. struct ol_txrx_stats_req *req);
  418. void (*txrx_host_stats_clr)(struct cdp_vdev *vdev);
  419. void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
  420. int (*txrx_stats_publish)(struct cdp_pdev *pdev,
  421. void *buf);
  422. /**
  423. * @brief Enable enhanced stats functionality.
  424. *
  425. * @param pdev - the physical device object
  426. * @return - void
  427. */
  428. void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
  429. /**
  430. * @brief Disable enhanced stats functionality.
  431. *
  432. * @param pdev - the physical device object
  433. * @return - void
  434. */
  435. void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
  436. /**
  437. * @brief Get the desired stats from the message.
  438. *
  439. * @param pdev - the physical device object
  440. * @param stats_base - stats buffer recieved from FW
  441. * @param type - stats type.
  442. * @return - pointer to requested stat identified by type
  443. */
  444. uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
  445. uint32_t *stats_base, uint32_t msg_len, uint8_t type);
  446. void
  447. (*tx_print_tso_stats)(struct cdp_vdev *vdev);
  448. void
  449. (*tx_rst_tso_stats)(struct cdp_vdev *vdev);
  450. void
  451. (*tx_print_sg_stats)(struct cdp_vdev *vdev);
  452. void
  453. (*tx_rst_sg_stats)(struct cdp_vdev *vdev);
  454. void
  455. (*print_rx_cksum_stats)(struct cdp_vdev *vdev);
  456. void
  457. (*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
  458. A_STATUS
  459. (*txrx_host_me_stats)(struct cdp_vdev *vdev);
  460. void
  461. (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
  462. int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
  463. struct ol_txrx_stats_req *req);
  464. void
  465. (*print_lro_stats)(struct cdp_vdev *vdev);
  466. void
  467. (*reset_lro_stats)(struct cdp_vdev *vdev);
  468. void
  469. (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr,
  470. uint32_t cap);
  471. void
  472. (*get_htt_stats)(struct cdp_pdev *pdev, void *data,
  473. uint32_t data_len);
  474. };
  475. struct cdp_wds_ops {
  476. void
  477. (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
  478. u_int32_t val);
  479. void
  480. (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer,
  481. int wds_tx_ucast, int wds_tx_mcast);
  482. int (*vdev_set_wds)(void *vdev, uint32_t val);
  483. };
  484. struct cdp_raw_ops {
  485. int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
  486. void (*rsim_get_astentry)(struct cdp_vdev *vdev,
  487. qdf_nbuf_t *pnbuf,
  488. struct cdp_raw_ast *raw_ast);
  489. };
  490. #ifdef CONFIG_WIN
  491. struct cdp_pflow_ops {
  492. uint32_t(*pflow_update_pdev_params)(void *,
  493. enum _ol_ath_param_t, uint32_t, void *);
  494. };
  495. #endif /* CONFIG_WIN */
  496. #define LRO_IPV4_SEED_ARR_SZ 5
  497. #define LRO_IPV6_SEED_ARR_SZ 11
  498. /**
  499. * struct cdp_lro_config - set LRO init parameters
  500. * @lro_enable: indicates whether lro is enabled
  501. * @tcp_flag: If the TCP flags from the packet do not match
  502. * the values in this field after masking with TCP flags mask
  503. * below, packet is not LRO eligible
  504. * @tcp_flag_mask: field for comparing the TCP values provided
  505. * above with the TCP flags field in the received packet
  506. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  507. * 5-tuple toeplitz hash for ipv4 packets
  508. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  509. * 5-tuple toeplitz hash for ipv6 packets
  510. */
  511. struct cdp_lro_hash_config {
  512. uint32_t lro_enable;
  513. uint32_t tcp_flag:9,
  514. tcp_flag_mask:9;
  515. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  516. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  517. };
  518. struct ol_if_ops {
  519. void (*peer_set_default_routing)(void *scn_handle,
  520. uint8_t *peer_macaddr, uint8_t vdev_id,
  521. bool hash_based, uint8_t ring_num);
  522. int (*peer_rx_reorder_queue_setup)(void *scn_handle,
  523. uint8_t vdev_id, uint8_t *peer_mac,
  524. qdf_dma_addr_t hw_qdesc, int tid, uint16_t queue_num);
  525. int (*peer_rx_reorder_queue_remove)(void *scn_handle,
  526. uint8_t vdev_id, uint8_t *peer_macaddr,
  527. uint32_t tid_mask);
  528. int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id,
  529. uint8_t *peer_macaddr);
  530. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  531. int (*peer_add_wds_entry)(void *ol_osif_vdev_handle,
  532. const uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  533. uint32_t flags);
  534. int (*peer_update_wds_entry)(void *ol_osif_vdev_handle,
  535. uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  536. uint32_t flags);
  537. void (*peer_del_wds_entry)(void *ol_osif_vdev_handle,
  538. uint8_t *wds_macaddr);
  539. QDF_STATUS (*lro_hash_config)(void *scn_handle,
  540. struct cdp_lro_hash_config *lro_hash);
  541. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  542. uint8_t type);
  543. uint8_t (*rx_invalid_peer)(void *osif_pdev, void *msg);
  544. int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
  545. uint8_t vdev_id, uint8_t *peer_mac_addr,
  546. enum cdp_txrx_ast_entry_type peer_type);
  547. int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id);
  548. int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num);
  549. void (*rx_mic_error)(void *ol_soc_handle,
  550. uint16_t vdev_id, void *wh);
  551. uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id);
  552. void (*record_act_change)(struct wlan_objmgr_pdev *pdev,
  553. u_int8_t *dstmac, bool active);
  554. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  555. };
  556. #ifndef CONFIG_WIN
  557. /* From here MCL specific OPs */
  558. /**
  559. * struct cdp_misc_ops - mcl ops not classified
  560. * @set_ibss_vdev_heart_beat_timer:
  561. * @bad_peer_txctl_set_setting:
  562. * @bad_peer_txctl_update_threshold:
  563. * @hl_tdls_flag_reset:
  564. * @tx_non_std:
  565. * @get_vdev_id:
  566. * @set_wisa_mode:
  567. * @txrx_data_stall_cb_register:
  568. * @txrx_data_stall_cb_deregister:
  569. * @txrx_post_data_stall_event
  570. * @runtime_suspend:
  571. * @runtime_resume:
  572. */
  573. struct cdp_misc_ops {
  574. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
  575. uint16_t timer_value_sec);
  576. void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
  577. struct ol_tx_wmm_param_t wmm_param);
  578. void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
  579. int period, int txq_limit);
  580. void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
  581. int level, int tput_thresh, int tx_limit);
  582. void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
  583. qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
  584. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  585. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  586. QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
  587. QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb);
  588. QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb);
  589. void (*txrx_post_data_stall_event)(
  590. enum data_stall_log_event_indicator indicator,
  591. enum data_stall_log_event_type data_stall_type,
  592. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  593. enum data_stall_log_recovery_type recovery_type);
  594. QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
  595. QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
  596. int (*get_opmode)(struct cdp_vdev *vdev);
  597. void (*mark_first_wakeup_packet)(uint8_t value);
  598. void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
  599. void (*flush_rx_frames)(void *peer, bool drop);
  600. A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
  601. uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
  602. void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
  603. void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
  604. };
  605. /**
  606. * struct cdp_tx_delay_ops - mcl tx delay ops
  607. * @tx_delay:
  608. * @tx_delay_hist:
  609. * @tx_packet_count:
  610. * @tx_set_compute_interval:
  611. */
  612. struct cdp_tx_delay_ops {
  613. void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
  614. uint32_t *tx_delay_microsec, int category);
  615. void (*tx_delay_hist)(struct cdp_pdev *pdev,
  616. uint16_t *bin_values, int category);
  617. void (*tx_packet_count)(struct cdp_pdev *pdev,
  618. uint16_t *out_packet_count,
  619. uint16_t *out_packet_loss_count, int category);
  620. void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
  621. uint32_t interval);
  622. };
  623. /**
  624. * struct cdp_pmf_ops - mcl protected management frame ops
  625. * @get_pn_info:
  626. */
  627. struct cdp_pmf_ops {
  628. void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
  629. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  630. };
  631. /**
  632. * struct cdp_cfg_ops - mcl configuration ops
  633. * @set_cfg_rx_fwd_disabled:
  634. * @set_cfg_packet_log_enabled:
  635. * @cfg_attach:
  636. * @vdev_rx_set_intrabss_fwd:
  637. * @get_opmode:
  638. * @is_rx_fwd_disabled:
  639. * @tx_set_is_mgmt_over_wmi_enabled:
  640. * @is_high_latency:
  641. * @set_flow_control_parameters:
  642. */
  643. struct cdp_cfg_ops {
  644. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  645. uint8_t disable_rx_fwd);
  646. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  647. uint8_t val);
  648. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  649. void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
  650. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  651. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  652. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  653. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  654. void *param);
  655. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  656. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  657. };
  658. /**
  659. * struct cdp_flowctl_ops - mcl flow control
  660. * @register_pause_cb:
  661. * @set_desc_global_pool_size:
  662. * @dump_flow_pool_info:
  663. */
  664. struct cdp_flowctl_ops {
  665. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  666. tx_pause_callback);
  667. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  668. void (*dump_flow_pool_info)(void *);
  669. };
  670. /**
  671. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  672. * @register_tx_flow_control:
  673. * @deregister_tx_flow_control_cb:
  674. * @flow_control_cb:
  675. * @get_tx_resource:
  676. * @ll_set_tx_pause_q_depth:
  677. * @vdev_flush:
  678. * @vdev_pause:
  679. * @vdev_unpause:
  680. */
  681. struct cdp_lflowctl_ops {
  682. int (*register_tx_flow_control)(uint8_t vdev_id,
  683. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  684. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  685. int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
  686. void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
  687. bool (*get_tx_resource)(uint8_t sta_id,
  688. unsigned int low_watermark,
  689. unsigned int high_watermark_offset);
  690. int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
  691. void (*vdev_flush)(struct cdp_vdev *vdev);
  692. void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason);
  693. void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
  694. };
  695. #ifdef IPA_OFFLOAD
  696. /**
  697. * struct cdp_ipa_ops - mcl ipa data path ops
  698. * @ipa_get_resource:
  699. * @ipa_set_doorbell_paddr:
  700. * @ipa_set_active:
  701. * @ipa_op_response:
  702. * @ipa_register_op_cb:
  703. * @ipa_get_stat:
  704. * @ipa_tx_data_frame:
  705. */
  706. struct cdp_ipa_ops {
  707. QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev);
  708. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev);
  709. QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active,
  710. bool is_tx);
  711. QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
  712. QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev,
  713. void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
  714. void *usr_ctxt);
  715. QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev);
  716. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
  717. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  718. uint32_t value);
  719. #ifdef FEATURE_METERING
  720. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
  721. uint8_t reset_stats);
  722. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
  723. uint64_t quota_bytes);
  724. #endif
  725. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev);
  726. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev);
  727. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  728. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  729. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  730. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
  731. QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
  732. uint32_t rx_pipe_handle);
  733. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  734. qdf_ipa_client_type_t prod_client,
  735. qdf_ipa_client_type_t cons_client,
  736. uint8_t session_id, bool is_ipv6_enabled);
  737. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  738. QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev);
  739. QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev);
  740. QDF_STATUS (*ipa_set_perf_level)(int client,
  741. uint32_t max_supported_bw_mbps);
  742. };
  743. #endif
  744. /**
  745. * struct cdp_bus_ops - mcl bus suspend/resume ops
  746. * @bus_suspend:
  747. * @bus_resume:
  748. */
  749. struct cdp_bus_ops {
  750. QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev);
  751. QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev);
  752. };
  753. /**
  754. * struct cdp_ocb_ops - mcl ocb ops
  755. * @set_ocb_chan_info:
  756. * @get_ocb_chan_info:
  757. */
  758. struct cdp_ocb_ops {
  759. void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
  760. struct ol_txrx_ocb_set_chan ocb_set_chan);
  761. struct ol_txrx_ocb_chan_info *
  762. (*get_ocb_chan_info)(struct cdp_vdev *vdev);
  763. };
  764. /**
  765. * struct cdp_peer_ops - mcl peer related ops
  766. * @register_peer:
  767. * @clear_peer:
  768. * @cfg_attach:
  769. * @find_peer_by_addr:
  770. * @find_peer_by_addr_and_vdev:
  771. * @local_peer_id:
  772. * @peer_find_by_local_id:
  773. * @peer_state_update:
  774. * @get_vdevid:
  775. * @get_vdev_by_sta_id:
  776. * @register_ocb_peer:
  777. * @peer_get_peer_mac_addr:
  778. * @get_peer_state:
  779. * @get_vdev_for_peer:
  780. * @update_ibss_add_peer_num_of_vdev:
  781. * @remove_peers_for_vdev:
  782. * @remove_peers_for_vdev_no_lock:
  783. * @copy_mac_addr_raw:
  784. * @add_last_real_peer:
  785. * @last_assoc_received:
  786. * @last_disassoc_received:
  787. * @last_deauth_received:
  788. * @is_vdev_restore_last_peer:
  789. * @update_last_real_peer:
  790. */
  791. struct cdp_peer_ops {
  792. QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
  793. struct ol_txrx_desc_type *sta_desc);
  794. QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id);
  795. QDF_STATUS (*change_peer_state)(uint8_t sta_id,
  796. enum ol_txrx_peer_state sta_state,
  797. bool roam_synch_in_progress);
  798. void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
  799. u8 *peer_addr, uint8_t *peer_id,
  800. enum peer_debug_id_type debug_id);
  801. void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
  802. void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
  803. uint8_t *peer_addr, uint8_t *peer_id);
  804. void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
  805. struct cdp_vdev *vdev,
  806. uint8_t *peer_addr, uint8_t *peer_id);
  807. uint16_t (*local_peer_id)(void *peer);
  808. void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
  809. uint8_t local_peer_id);
  810. QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
  811. uint8_t *peer_addr,
  812. enum ol_txrx_peer_state state);
  813. QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
  814. struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev,
  815. uint8_t sta_id);
  816. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id);
  817. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  818. int (*get_peer_state)(void *peer);
  819. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  820. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
  821. int16_t peer_num_delta);
  822. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  823. ol_txrx_vdev_peer_remove_cb callback,
  824. void *callback_context, bool remove_last_peer);
  825. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  826. ol_txrx_vdev_peer_remove_cb callback,
  827. void *callback_context);
  828. void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
  829. void (*add_last_real_peer)(struct cdp_pdev *pdev,
  830. struct cdp_vdev *vdev, uint8_t *peer_id);
  831. qdf_time_t * (*last_assoc_received)(void *peer);
  832. qdf_time_t * (*last_disassoc_received)(void *peer);
  833. qdf_time_t * (*last_deauth_received)(void *peer);
  834. bool (*is_vdev_restore_last_peer)(void *peer);
  835. void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer,
  836. uint8_t *peer_id, bool restore_last_peer);
  837. void (*peer_detach_force_delete)(void *peer);
  838. };
  839. /**
  840. * struct cdp_ocb_ops - mcl ocb ops
  841. * @throttle_init_period:
  842. * @throttle_set_level:
  843. */
  844. struct cdp_throttle_ops {
  845. void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
  846. uint8_t *dutycycle_level);
  847. void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
  848. };
  849. /**
  850. * struct cdp_ocb_ops - mcl ocb ops
  851. * @clear_stats:
  852. * @stats:
  853. */
  854. struct cdp_mob_stats_ops {
  855. void (*clear_stats)(uint16_t bitmap);
  856. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  857. };
  858. #endif /* CONFIG_WIN */
  859. struct cdp_ops {
  860. struct cdp_cmn_ops *cmn_drv_ops;
  861. struct cdp_ctrl_ops *ctrl_ops;
  862. struct cdp_me_ops *me_ops;
  863. struct cdp_mon_ops *mon_ops;
  864. struct cdp_host_stats_ops *host_stats_ops;
  865. struct cdp_wds_ops *wds_ops;
  866. struct cdp_raw_ops *raw_ops;
  867. struct cdp_pflow_ops *pflow_ops;
  868. #ifndef CONFIG_WIN
  869. struct cdp_misc_ops *misc_ops;
  870. struct cdp_cfg_ops *cfg_ops;
  871. struct cdp_flowctl_ops *flowctl_ops;
  872. struct cdp_lflowctl_ops *l_flowctl_ops;
  873. #ifdef IPA_OFFLOAD
  874. struct cdp_ipa_ops *ipa_ops;
  875. #endif
  876. struct cdp_bus_ops *bus_ops;
  877. struct cdp_ocb_ops *ocb_ops;
  878. struct cdp_peer_ops *peer_ops;
  879. struct cdp_throttle_ops *throttle_ops;
  880. struct cdp_mob_stats_ops *mob_stats_ops;
  881. struct cdp_tx_delay_ops *delay_ops;
  882. struct cdp_pmf_ops *pmf_ops;
  883. #endif /* CONFIG_WIN */
  884. };
  885. #endif