cdp_txrx_ops.h 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254
  1. /*
  2. * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #ifdef IPA_OFFLOAD
  32. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  33. #include <qdf_ipa_wdi3.h>
  34. #else
  35. #include <qdf_ipa.h>
  36. #endif
  37. #endif
  38. /**
  39. * bitmap values to indicate special handling of peer_delete
  40. */
  41. #define CDP_PEER_DELETE_NO_SPECIAL 0
  42. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  43. /* same as ieee80211_nac_param */
  44. enum cdp_nac_param_cmd {
  45. /* IEEE80211_NAC_PARAM_ADD */
  46. CDP_NAC_PARAM_ADD = 1,
  47. /* IEEE80211_NAC_PARAM_DEL */
  48. CDP_NAC_PARAM_DEL,
  49. /* IEEE80211_NAC_PARAM_LIST */
  50. CDP_NAC_PARAM_LIST,
  51. };
  52. /******************************************************************************
  53. *
  54. * Control Interface (A Interface)
  55. *
  56. *****************************************************************************/
  57. struct cdp_cmn_ops {
  58. int (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  59. int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
  60. struct cdp_vdev *(*txrx_vdev_attach)
  61. (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
  62. uint8_t vdev_id, enum wlan_op_mode op_mode);
  63. void (*txrx_vdev_detach)
  64. (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
  65. void *cb_context);
  66. struct cdp_pdev *(*txrx_pdev_attach)
  67. (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  68. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
  69. int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
  70. void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
  71. void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
  72. void *(*txrx_peer_create)
  73. (struct cdp_vdev *vdev, uint8_t *peer_mac_addr,
  74. struct cdp_ctrl_objmgr_peer *ctrl_peer);
  75. void (*txrx_peer_setup)
  76. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  77. void (*txrx_peer_teardown)
  78. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  79. int (*txrx_peer_add_ast)
  80. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  81. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  82. uint32_t flags);
  83. void (*txrx_peer_del_ast)
  84. (ol_txrx_soc_handle soc, void *ast_hdl);
  85. int (*txrx_peer_update_ast)
  86. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  87. uint8_t *mac_addr, uint32_t flags);
  88. void *(*txrx_peer_ast_hash_find)
  89. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr);
  90. uint8_t (*txrx_peer_ast_get_pdev_id)
  91. (ol_txrx_soc_handle soc, void *ast_hdl);
  92. uint8_t (*txrx_peer_ast_get_next_hop)
  93. (ol_txrx_soc_handle soc, void *ast_hdl);
  94. void (*txrx_peer_ast_set_type)
  95. (ol_txrx_soc_handle soc, void *ast_hdl,
  96. enum cdp_txrx_ast_entry_type type);
  97. #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
  98. void (*txrx_peer_ast_set_cp_ctx)(ol_txrx_soc_handle soc,
  99. void *ast_entry,
  100. void *cp_ctx);
  101. void * (*txrx_peer_ast_get_cp_ctx)(ol_txrx_soc_handle soc,
  102. void *ast_entry);
  103. bool (*txrx_peer_ast_get_wmi_sent)(ol_txrx_soc_handle soc,
  104. void *ast_entry);
  105. void (*txrx_peer_ast_free_entry)(ol_txrx_soc_handle soc,
  106. void *ast_entry);
  107. #endif
  108. enum cdp_txrx_ast_entry_type (*txrx_peer_ast_get_type)
  109. (ol_txrx_soc_handle soc, void *ast_hdl);
  110. void (*txrx_peer_delete)(void *peer, uint32_t bitmap);
  111. int (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
  112. uint8_t smart_monitor);
  113. uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev);
  114. void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev,
  115. int16_t chan_noise_floor);
  116. void (*txrx_set_nac)(struct cdp_peer *peer);
  117. void (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val);
  118. void (*txrx_get_peer_mac_from_peer_id)
  119. (struct cdp_pdev *pdev_handle,
  120. uint32_t peer_id, uint8_t *peer_mac);
  121. void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev);
  122. void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev);
  123. void (*txrx_ath_getstats)(void *pdev,
  124. struct cdp_dev_stats *stats, uint8_t type);
  125. void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status,
  126. u_int8_t *user_position);
  127. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev);
  128. void (*txrx_if_mgmt_drain)(void *ni, int force);
  129. void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
  130. void (*txrx_set_privacy_filters)
  131. (struct cdp_vdev *vdev, void *filter, uint32_t num);
  132. /********************************************************************
  133. * Data Interface (B Interface)
  134. ********************************************************************/
  135. void (*txrx_vdev_register)(struct cdp_vdev *vdev,
  136. void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
  137. struct ol_txrx_ops *txrx_ops);
  138. int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
  139. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  140. int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
  141. qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
  142. uint16_t chanfreq);
  143. /**
  144. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  145. * callback function
  146. */
  147. void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type,
  148. ol_txrx_mgmt_tx_cb download_cb,
  149. ol_txrx_mgmt_tx_cb ota_ack_cb,
  150. void *ctxt);
  151. int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
  152. /**
  153. * ol_txrx_data_tx_cb - Function registered with the data path
  154. * that is called when tx frames marked as "no free" are
  155. * done being transmitted
  156. */
  157. void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
  158. ol_txrx_data_tx_cb callback, void *ctxt);
  159. /*******************************************************************
  160. * Statistics and Debugging Interface (C Interface)
  161. ********************************************************************/
  162. int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
  163. int max_subfrms_amsdu);
  164. A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
  165. struct ol_txrx_stats_req *req,
  166. bool per_vdev, bool response_expected);
  167. int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
  168. void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
  169. uint8_t cfg_stats_type, uint32_t cfg_val);
  170. void (*txrx_print_level_set)(unsigned level);
  171. /**
  172. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  173. * @vdev: vdev handle
  174. *
  175. * Return: vdev mac address
  176. */
  177. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
  178. /**
  179. * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  180. * vdev
  181. * @vdev: vdev handle
  182. *
  183. * Return: Handle to struct qdf_mac_addr
  184. */
  185. struct qdf_mac_addr *
  186. (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
  187. /**
  188. * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
  189. * @vdev: vdev handle
  190. *
  191. * Return: Handle to pdev
  192. */
  193. struct cdp_pdev *(*txrx_get_pdev_from_vdev)
  194. (struct cdp_vdev *vdev);
  195. /**
  196. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  197. * @vdev: vdev handle
  198. *
  199. * Return: Handle to control pdev
  200. */
  201. struct cdp_cfg *
  202. (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
  203. struct cdp_vdev *
  204. (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
  205. uint8_t vdev_id);
  206. void (*txrx_soc_detach)(void *soc);
  207. int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid,
  208. int status);
  209. int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
  210. uint16_t tid, uint16_t batimeout,
  211. uint16_t buffersize,
  212. uint16_t startseqnum);
  213. void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
  214. uint8_t *dialogtoken, uint16_t *statuscode,
  215. uint16_t *buffersize, uint16_t *batimeout);
  216. int (*delba_process)(void *peer_handle,
  217. int tid, uint16_t reasoncode);
  218. /**
  219. * delba_tx_completion() - Indicate delba tx status
  220. * @peer_handle: Peer handle
  221. * @tid: Tid number
  222. * @status: Tx completion status
  223. *
  224. * Return: 0 on Success, 1 on failure
  225. */
  226. int (*delba_tx_completion)(void *peer_handle,
  227. uint8_t tid, int status);
  228. void (*set_addba_response)(void *peer_handle,
  229. uint8_t tid, uint16_t statuscode);
  230. uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
  231. uint16_t peer_id, uint8_t *mac_addr);
  232. void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
  233. uint8_t map_id);
  234. void (*flush_cache_rx_queue)(void);
  235. void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
  236. uint8_t tos, uint8_t tid);
  237. QDF_STATUS (*txrx_stats_request)(struct cdp_vdev *vdev,
  238. struct cdp_txrx_stats_req *req);
  239. QDF_STATUS (*display_stats)(void *psoc, uint16_t value,
  240. enum qdf_stats_verbosity_level level);
  241. void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
  242. int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
  243. QDF_STATUS (*txrx_intr_attach)(void *soc);
  244. void (*txrx_intr_detach)(void *soc);
  245. void (*set_pn_check)(struct cdp_vdev *vdev,
  246. struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
  247. uint32_t *rx_pn);
  248. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  249. struct cdp_config_params *params);
  250. void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl);
  251. void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl,
  252. void *dp_txrx_hdl);
  253. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  254. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  255. void *dp_txrx_handle);
  256. void (*txrx_peer_reset_ast)
  257. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr, void *vdev_hdl);
  258. void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  259. void *vdev_hdl);
  260. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  261. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  262. uint8_t ac, uint32_t value);
  263. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  264. uint8_t ac, uint32_t *value);
  265. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  266. uint32_t num_peers,
  267. bool peer_map_unmap_v2);
  268. void (*txrx_pdev_set_ctrl_pdev)(struct cdp_pdev *pdev_hdl,
  269. struct cdp_ctrl_objmgr_pdev *ctrl_pdev);
  270. ol_txrx_tx_fp tx_send;
  271. /**
  272. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  273. * to deliver pkt to stack.
  274. * @vdev: vdev handle
  275. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  276. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  277. */
  278. void (*txrx_get_os_rx_handles_from_vdev)
  279. (struct cdp_vdev *vdev,
  280. ol_txrx_rx_fp *stack_fn,
  281. ol_osif_vdev_handle *osif_vdev);
  282. int (*txrx_classify_update)
  283. (struct cdp_vdev *vdev, qdf_nbuf_t skb,
  284. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  285. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  286. enum cdp_capabilities dp_caps);
  287. };
  288. struct cdp_ctrl_ops {
  289. int
  290. (*txrx_mempools_attach)(void *ctrl_pdev);
  291. int
  292. (*txrx_set_filter_neighbour_peers)(
  293. struct cdp_pdev *pdev,
  294. uint32_t val);
  295. int
  296. (*txrx_update_filter_neighbour_peers)(
  297. struct cdp_vdev *vdev,
  298. uint32_t cmd, uint8_t *macaddr);
  299. /**
  300. * @brief set the safemode of the device
  301. * @details
  302. * This flag is used to bypass the encrypt and decrypt processes when
  303. * send and receive packets. It works like open AUTH mode, HW will
  304. * ctreate all packets as non-encrypt frames because no key installed.
  305. * For rx fragmented frames,it bypasses all the rx defragmentaion.
  306. *
  307. * @param vdev - the data virtual device object
  308. * @param val - the safemode state
  309. * @return - void
  310. */
  311. void
  312. (*txrx_set_safemode)(
  313. struct cdp_vdev *vdev,
  314. u_int32_t val);
  315. /**
  316. * @brief configure the drop unencrypted frame flag
  317. * @details
  318. * Rx related. When set this flag, all the unencrypted frames
  319. * received over a secure connection will be discarded
  320. *
  321. * @param vdev - the data virtual device object
  322. * @param val - flag
  323. * @return - void
  324. */
  325. void
  326. (*txrx_set_drop_unenc)(
  327. struct cdp_vdev *vdev,
  328. u_int32_t val);
  329. /**
  330. * @brief set the Tx encapsulation type of the VDEV
  331. * @details
  332. * This will be used to populate the HTT desc packet type field
  333. * during Tx
  334. * @param vdev - the data virtual device object
  335. * @param val - the Tx encap type
  336. * @return - void
  337. */
  338. void
  339. (*txrx_set_tx_encap_type)(
  340. struct cdp_vdev *vdev,
  341. enum htt_cmn_pkt_type val);
  342. /**
  343. * @brief set the Rx decapsulation type of the VDEV
  344. * @details
  345. * This will be used to configure into firmware and hardware
  346. * which format to decap all Rx packets into, for all peers under
  347. * the VDEV.
  348. * @param vdev - the data virtual device object
  349. * @param val - the Rx decap mode
  350. * @return - void
  351. */
  352. void
  353. (*txrx_set_vdev_rx_decap_type)(
  354. struct cdp_vdev *vdev,
  355. enum htt_cmn_pkt_type val);
  356. /**
  357. * @brief get the Rx decapsulation type of the VDEV
  358. *
  359. * @param vdev - the data virtual device object
  360. * @return - the Rx decap type
  361. */
  362. enum htt_cmn_pkt_type
  363. (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
  364. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  365. /**
  366. * @brief Update the authorize peer object at association time
  367. * @details
  368. * For the host-based implementation of rate-control, it
  369. * updates the peer/node-related parameters within rate-control
  370. * context of the peer at association.
  371. *
  372. * @param peer - pointer to the node's object
  373. * @authorize - either to authorize or unauthorize peer
  374. *
  375. * @return none
  376. */
  377. void
  378. (*txrx_peer_authorize)(struct cdp_peer *peer,
  379. u_int32_t authorize);
  380. bool
  381. (*txrx_set_inact_params)(struct cdp_pdev *pdev,
  382. u_int16_t inact_check_interval,
  383. u_int16_t inact_normal,
  384. u_int16_t inact_overload);
  385. bool
  386. (*txrx_start_inact_timer)(
  387. struct cdp_pdev *pdev,
  388. bool enable);
  389. /**
  390. * @brief Set the overload status of the radio
  391. * @details
  392. * Set the overload status of the radio, updating the inactivity
  393. * threshold and inactivity count for each node.
  394. *
  395. * @param pdev - the data physical device object
  396. * @param overload - whether the radio is overloaded or not
  397. */
  398. void (*txrx_set_overload)(
  399. struct cdp_pdev *pdev,
  400. bool overload);
  401. /**
  402. * @brief Check the inactivity status of the peer/node
  403. *
  404. * @param peer - pointer to the node's object
  405. * @return true if the node is inactive; otherwise return false
  406. */
  407. bool
  408. (*txrx_peer_is_inact)(void *peer);
  409. /**
  410. * @brief Mark inactivity status of the peer/node
  411. * @details
  412. * If it becomes active, reset inactivity count to reload value;
  413. * if the inactivity status changed, notify umac band steering.
  414. *
  415. * @param peer - pointer to the node's object
  416. * @param inactive - whether the node is inactive or not
  417. */
  418. void (*txrx_mark_peer_inact)(
  419. void *peer,
  420. bool inactive);
  421. /* Should be ol_txrx_ctrl_api.h */
  422. void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
  423. /**
  424. * @brief setting mesh rx filter
  425. * @details
  426. * based on the bits enabled in the filter packets has to be dropped.
  427. *
  428. * @param vdev - the data virtual device object
  429. * @param val - value to set
  430. */
  431. void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
  432. void (*tx_flush_buffers)(struct cdp_vdev *vdev);
  433. int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
  434. void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
  435. enum cdp_vdev_param_type param, uint32_t val);
  436. void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
  437. /**
  438. * @brief Set the reo dest ring num of the radio
  439. * @details
  440. * Set the reo destination ring no on which we will receive
  441. * pkts for this radio.
  442. *
  443. * @param pdev - the data physical device object
  444. * @param reo_dest_ring_num - value ranges between 1 - 4
  445. */
  446. void (*txrx_set_pdev_reo_dest)(
  447. struct cdp_pdev *pdev,
  448. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  449. /**
  450. * @brief Get the reo dest ring num of the radio
  451. * @details
  452. * Get the reo destination ring no on which we will receive
  453. * pkts for this radio.
  454. *
  455. * @param pdev - the data physical device object
  456. * @return the reo destination ring number
  457. */
  458. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  459. struct cdp_pdev *pdev);
  460. int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
  461. uint32_t event);
  462. int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
  463. uint32_t event);
  464. int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
  465. void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
  466. uint8_t subtype, uint8_t tx_power);
  467. void (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
  468. enum cdp_pdev_param_type type, uint8_t val);
  469. void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
  470. #ifdef ATH_SUPPORT_NAC_RSSI
  471. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev,
  472. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  473. uint8_t chan_num);
  474. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev,
  475. char *macaddr,
  476. uint8_t *rssi);
  477. #endif
  478. void (*set_key)(struct cdp_peer *peer_handle,
  479. bool is_unicast, uint32_t *key);
  480. };
  481. struct cdp_me_ops {
  482. u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
  483. (struct cdp_pdev *pdev, u_int16_t buf_count);
  484. u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
  485. struct cdp_pdev *pdev,
  486. u_int16_t buf_count);
  487. u_int16_t
  488. (*tx_get_mcast_buf_allocated_marked)
  489. (struct cdp_pdev *pdev);
  490. void
  491. (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
  492. void
  493. (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
  494. uint16_t
  495. (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
  496. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  497. uint8_t newmaccnt);
  498. /* Should be a function pointer in ol_txrx_osif_ops{} */
  499. /**
  500. * @brief notify mcast frame indication from FW.
  501. * @details
  502. * This notification will be used to convert
  503. * multicast frame to unicast.
  504. *
  505. * @param pdev - handle to the ctrl SW's physical device object
  506. * @param vdev_id - ID of the virtual device received the special data
  507. * @param msdu - the multicast msdu returned by FW for host inspect
  508. */
  509. int (*mcast_notify)(struct cdp_pdev *pdev,
  510. u_int8_t vdev_id, qdf_nbuf_t msdu);
  511. };
  512. struct cdp_mon_ops {
  513. void (*txrx_monitor_set_filter_ucast_data)
  514. (struct cdp_pdev *, u_int8_t val);
  515. void (*txrx_monitor_set_filter_mcast_data)
  516. (struct cdp_pdev *, u_int8_t val);
  517. void (*txrx_monitor_set_filter_non_data)
  518. (struct cdp_pdev *, u_int8_t val);
  519. bool (*txrx_monitor_get_filter_ucast_data)
  520. (struct cdp_vdev *vdev_txrx_handle);
  521. bool (*txrx_monitor_get_filter_mcast_data)
  522. (struct cdp_vdev *vdev_txrx_handle);
  523. bool (*txrx_monitor_get_filter_non_data)
  524. (struct cdp_vdev *vdev_txrx_handle);
  525. int (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
  526. /* HK advance monitor filter support */
  527. int (*txrx_set_advance_monitor_filter)
  528. (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
  529. };
  530. struct cdp_host_stats_ops {
  531. int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
  532. struct ol_txrx_stats_req *req);
  533. void (*txrx_host_stats_clr)(struct cdp_vdev *vdev);
  534. void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
  535. int (*txrx_stats_publish)(struct cdp_pdev *pdev,
  536. void *buf);
  537. /**
  538. * @brief Enable enhanced stats functionality.
  539. *
  540. * @param pdev - the physical device object
  541. * @return - void
  542. */
  543. void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
  544. /**
  545. * @brief Disable enhanced stats functionality.
  546. *
  547. * @param pdev - the physical device object
  548. * @return - void
  549. */
  550. void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
  551. /**
  552. * @brief Get the desired stats from the message.
  553. *
  554. * @param pdev - the physical device object
  555. * @param stats_base - stats buffer received from FW
  556. * @param type - stats type.
  557. * @return - pointer to requested stat identified by type
  558. */
  559. uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
  560. uint32_t *stats_base, uint32_t msg_len, uint8_t type);
  561. void
  562. (*tx_print_tso_stats)(struct cdp_vdev *vdev);
  563. void
  564. (*tx_rst_tso_stats)(struct cdp_vdev *vdev);
  565. void
  566. (*tx_print_sg_stats)(struct cdp_vdev *vdev);
  567. void
  568. (*tx_rst_sg_stats)(struct cdp_vdev *vdev);
  569. void
  570. (*print_rx_cksum_stats)(struct cdp_vdev *vdev);
  571. void
  572. (*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
  573. A_STATUS
  574. (*txrx_host_me_stats)(struct cdp_vdev *vdev);
  575. void
  576. (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
  577. int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
  578. struct ol_txrx_stats_req *req);
  579. void
  580. (*print_lro_stats)(struct cdp_vdev *vdev);
  581. void
  582. (*reset_lro_stats)(struct cdp_vdev *vdev);
  583. void
  584. (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr,
  585. uint32_t cap);
  586. void
  587. (*get_htt_stats)(struct cdp_pdev *pdev, void *data,
  588. uint32_t data_len);
  589. void
  590. (*txrx_update_pdev_stats)(struct cdp_pdev *pdev, void *data,
  591. uint16_t stats_id);
  592. struct cdp_peer_stats*
  593. (*txrx_get_peer_stats)(struct cdp_peer *peer);
  594. void
  595. (*txrx_reset_peer_ald_stats)(struct cdp_peer *peer);
  596. void
  597. (*txrx_reset_peer_stats)(struct cdp_peer *peer);
  598. int
  599. (*txrx_get_vdev_stats)(struct cdp_vdev *vdev, void *buf,
  600. bool is_aggregate);
  601. int
  602. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  603. void *data, uint32_t len,
  604. uint32_t stats_id);
  605. int
  606. (*txrx_get_vdev_extd_stats)(struct cdp_vdev *vdev_handle,
  607. void *buffer);
  608. void
  609. (*txrx_update_vdev_stats)(struct cdp_vdev *vdev, void *buf,
  610. uint16_t stats_id);
  611. };
  612. struct cdp_wds_ops {
  613. void
  614. (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
  615. u_int32_t val);
  616. void
  617. (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer,
  618. int wds_tx_ucast, int wds_tx_mcast);
  619. int (*vdev_set_wds)(void *vdev, uint32_t val);
  620. };
  621. struct cdp_raw_ops {
  622. int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
  623. void (*rsim_get_astentry)(struct cdp_vdev *vdev,
  624. qdf_nbuf_t *pnbuf,
  625. struct cdp_raw_ast *raw_ast);
  626. };
  627. #ifdef CONFIG_WIN
  628. struct cdp_pflow_ops {
  629. uint32_t(*pflow_update_pdev_params)(void *,
  630. enum _ol_ath_param_t, uint32_t, void *);
  631. };
  632. #endif /* CONFIG_WIN */
  633. #define LRO_IPV4_SEED_ARR_SZ 5
  634. #define LRO_IPV6_SEED_ARR_SZ 11
  635. /**
  636. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  637. * @lro_enable: indicates whether rx_offld is enabled
  638. * @tcp_flag: If the TCP flags from the packet do not match
  639. * the values in this field after masking with TCP flags mask
  640. * below, packet is not rx_offld eligible
  641. * @tcp_flag_mask: field for comparing the TCP values provided
  642. * above with the TCP flags field in the received packet
  643. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  644. * 5-tuple toeplitz hash for ipv4 packets
  645. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  646. * 5-tuple toeplitz hash for ipv6 packets
  647. */
  648. struct cdp_lro_hash_config {
  649. uint32_t lro_enable;
  650. uint32_t tcp_flag:9,
  651. tcp_flag_mask:9;
  652. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  653. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  654. };
  655. struct ol_if_ops {
  656. void
  657. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  658. uint8_t *peer_macaddr, uint8_t vdev_id,
  659. bool hash_based, uint8_t ring_num);
  660. QDF_STATUS
  661. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  662. uint8_t vdev_id, uint8_t *peer_mac,
  663. qdf_dma_addr_t hw_qdesc, int tid,
  664. uint16_t queue_num,
  665. uint8_t ba_window_size_valid,
  666. uint16_t ba_window_size);
  667. QDF_STATUS
  668. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  669. uint8_t vdev_id, uint8_t *peer_macaddr,
  670. uint32_t tid_mask);
  671. int (*peer_unref_delete)(void *scn_handle, uint8_t vdev_id,
  672. uint8_t *peer_macaddr);
  673. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  674. int (*peer_add_wds_entry)(void *ol_soc_handle,
  675. const uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  676. uint32_t flags);
  677. int (*peer_update_wds_entry)(void *ol_soc_handle,
  678. uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  679. uint32_t flags);
  680. void (*peer_del_wds_entry)(void *ol_soc_handle,
  681. uint8_t *wds_macaddr);
  682. #if defined(FEATURE_AST) && defined(AST_HKV1_WORKAROUND)
  683. void (*peer_del_wds_cp_ctx)(void *cp_ctx);
  684. #endif
  685. QDF_STATUS
  686. (*lro_hash_config)(struct wlan_objmgr_psoc *ctrl_psoc,
  687. struct cdp_lro_hash_config *rx_offld_hash);
  688. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  689. uint8_t type);
  690. #ifdef CONFIG_WIN
  691. uint8_t (*rx_invalid_peer)(void *ctrl_pdev, void *msg);
  692. #else
  693. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  694. #endif
  695. int (*peer_map_event)(void *ol_soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
  696. uint8_t vdev_id, uint8_t *peer_mac_addr,
  697. enum cdp_txrx_ast_entry_type peer_type,
  698. uint32_t tx_ast_hashidx);
  699. int (*peer_unmap_event)(void *ol_soc_handle, uint16_t peer_id);
  700. int (*get_dp_cfg_param)(void *ol_soc_handle, enum cdp_cfg_param_type param_num);
  701. void (*rx_mic_error)(void *ol_soc_handle,
  702. uint16_t vdev_id, void *wh);
  703. bool (*rx_frag_tkip_demic)(struct wlan_objmgr_peer *ctrl_peer,
  704. qdf_nbuf_t nbuf,
  705. uint16_t hdr_space);
  706. uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id);
  707. void (*record_act_change)(struct wlan_objmgr_pdev *pdev,
  708. u_int8_t *dstmac, bool active);
  709. #ifdef ATH_SUPPORT_NAC_RSSI
  710. int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  711. u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid,
  712. char *client_macaddr, uint8_t chan_num);
  713. int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  714. u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid);
  715. #endif
  716. int (*peer_sta_kickout)(void *ctrl_pdev, uint8_t *peer_macaddr);
  717. /**
  718. * send_delba() - Send delba to peer
  719. * @pdev_handle: Dp pdev handle
  720. * @ctrl_peer: Peer handle
  721. * @peer_macaddr: Peer mac addr
  722. * @tid: Tid number
  723. *
  724. * Return: 0 for success, non-zero for failure
  725. */
  726. int (*send_delba)(void *pdev_handle, void *ctrl_peer,
  727. uint8_t *peer_macaddr, uint8_t tid, void *vdev_handle,
  728. uint8_t reason_code);
  729. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  730. };
  731. #ifndef CONFIG_WIN
  732. /* From here MCL specific OPs */
  733. /**
  734. * struct cdp_misc_ops - mcl ops not classified
  735. * @set_ibss_vdev_heart_beat_timer:
  736. * @bad_peer_txctl_set_setting:
  737. * @bad_peer_txctl_update_threshold:
  738. * @hl_tdls_flag_reset:
  739. * @tx_non_std:
  740. * @get_vdev_id:
  741. * @set_wisa_mode:
  742. * @txrx_data_stall_cb_register:
  743. * @txrx_data_stall_cb_deregister:
  744. * @txrx_post_data_stall_event
  745. * @runtime_suspend:
  746. * @runtime_resume:
  747. */
  748. struct cdp_misc_ops {
  749. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
  750. uint16_t timer_value_sec);
  751. void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
  752. struct ol_tx_wmm_param_t wmm_param);
  753. void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
  754. int period, int txq_limit);
  755. void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
  756. int level, int tput_thresh, int tx_limit);
  757. void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
  758. qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
  759. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  760. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  761. uint32_t (*get_tx_ack_stats)(uint8_t vdev_id);
  762. QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
  763. QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb);
  764. QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb);
  765. void (*txrx_post_data_stall_event)(
  766. enum data_stall_log_event_indicator indicator,
  767. enum data_stall_log_event_type data_stall_type,
  768. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  769. enum data_stall_log_recovery_type recovery_type);
  770. QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
  771. QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
  772. int (*get_opmode)(struct cdp_vdev *vdev);
  773. void (*mark_first_wakeup_packet)(uint8_t value);
  774. void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
  775. void (*flush_rx_frames)(void *peer, bool drop);
  776. A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
  777. uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
  778. void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
  779. void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
  780. };
  781. /**
  782. * struct cdp_tx_delay_ops - mcl tx delay ops
  783. * @tx_delay:
  784. * @tx_delay_hist:
  785. * @tx_packet_count:
  786. * @tx_set_compute_interval:
  787. */
  788. struct cdp_tx_delay_ops {
  789. void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
  790. uint32_t *tx_delay_microsec, int category);
  791. void (*tx_delay_hist)(struct cdp_pdev *pdev,
  792. uint16_t *bin_values, int category);
  793. void (*tx_packet_count)(struct cdp_pdev *pdev,
  794. uint16_t *out_packet_count,
  795. uint16_t *out_packet_loss_count, int category);
  796. void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
  797. uint32_t interval);
  798. };
  799. /**
  800. * struct cdp_pmf_ops - mcl protected management frame ops
  801. * @get_pn_info:
  802. */
  803. struct cdp_pmf_ops {
  804. void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
  805. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  806. };
  807. /**
  808. * struct cdp_cfg_ops - mcl configuration ops
  809. * @set_cfg_rx_fwd_disabled:
  810. * @set_cfg_packet_log_enabled:
  811. * @cfg_attach:
  812. * @vdev_rx_set_intrabss_fwd:
  813. * @get_opmode:
  814. * @is_rx_fwd_disabled:
  815. * @tx_set_is_mgmt_over_wmi_enabled:
  816. * @is_high_latency:
  817. * @set_flow_control_parameters:
  818. */
  819. struct cdp_cfg_ops {
  820. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  821. uint8_t disable_rx_fwd);
  822. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  823. uint8_t val);
  824. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  825. void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
  826. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  827. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  828. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  829. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  830. void *param);
  831. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  832. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  833. };
  834. /**
  835. * struct cdp_flowctl_ops - mcl flow control
  836. * @register_pause_cb:
  837. * @set_desc_global_pool_size:
  838. * @dump_flow_pool_info:
  839. */
  840. struct cdp_flowctl_ops {
  841. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  842. struct cdp_pdev *pdev,
  843. uint8_t vdev_id);
  844. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  845. struct cdp_pdev *pdev,
  846. uint8_t vdev_id);
  847. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  848. tx_pause_callback);
  849. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  850. void (*dump_flow_pool_info)(void *);
  851. };
  852. /**
  853. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  854. * @register_tx_flow_control:
  855. * @deregister_tx_flow_control_cb:
  856. * @flow_control_cb:
  857. * @get_tx_resource:
  858. * @ll_set_tx_pause_q_depth:
  859. * @vdev_flush:
  860. * @vdev_pause:
  861. * @vdev_unpause:
  862. */
  863. struct cdp_lflowctl_ops {
  864. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  865. int (*register_tx_flow_control)(struct cdp_soc_t *soc,
  866. tx_pause_callback flowcontrol);
  867. int (*set_vdev_tx_desc_limit)(u8 vdev_id, u8 chan);
  868. int (*set_vdev_os_queue_status)(u8 vdev_id,
  869. enum netif_action_type action);
  870. #else
  871. int (*register_tx_flow_control)(uint8_t vdev_id,
  872. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  873. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  874. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  875. int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
  876. void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
  877. bool (*get_tx_resource)(uint8_t sta_id,
  878. unsigned int low_watermark,
  879. unsigned int high_watermark_offset);
  880. int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
  881. void (*vdev_flush)(struct cdp_vdev *vdev);
  882. void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason);
  883. void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
  884. };
  885. #ifdef IPA_OFFLOAD
  886. /**
  887. * struct cdp_ipa_ops - mcl ipa data path ops
  888. * @ipa_get_resource:
  889. * @ipa_set_doorbell_paddr:
  890. * @ipa_set_active:
  891. * @ipa_op_response:
  892. * @ipa_register_op_cb:
  893. * @ipa_get_stat:
  894. * @ipa_tx_data_frame:
  895. */
  896. struct cdp_ipa_ops {
  897. QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev);
  898. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev);
  899. QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active,
  900. bool is_tx);
  901. QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
  902. QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev,
  903. void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
  904. void *usr_ctxt);
  905. QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev);
  906. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
  907. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  908. uint32_t value);
  909. #ifdef FEATURE_METERING
  910. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
  911. uint8_t reset_stats);
  912. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
  913. uint64_t quota_bytes);
  914. #endif
  915. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev);
  916. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev);
  917. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  918. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  919. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  920. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  921. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle,
  922. bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in);
  923. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  924. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  925. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  926. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  927. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
  928. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  929. QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
  930. uint32_t rx_pipe_handle);
  931. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  932. qdf_ipa_client_type_t prod_client,
  933. qdf_ipa_client_type_t cons_client,
  934. uint8_t session_id, bool is_ipv6_enabled);
  935. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  936. QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev);
  937. QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev);
  938. QDF_STATUS (*ipa_set_perf_level)(int client,
  939. uint32_t max_supported_bw_mbps);
  940. };
  941. #endif
  942. /**
  943. * struct cdp_bus_ops - mcl bus suspend/resume ops
  944. * @bus_suspend:
  945. * @bus_resume:
  946. */
  947. struct cdp_bus_ops {
  948. QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev);
  949. QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev);
  950. };
  951. /**
  952. * struct cdp_ocb_ops - mcl ocb ops
  953. * @set_ocb_chan_info:
  954. * @get_ocb_chan_info:
  955. */
  956. struct cdp_ocb_ops {
  957. void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
  958. struct ol_txrx_ocb_set_chan ocb_set_chan);
  959. struct ol_txrx_ocb_chan_info *
  960. (*get_ocb_chan_info)(struct cdp_vdev *vdev);
  961. };
  962. /**
  963. * struct cdp_peer_ops - mcl peer related ops
  964. * @register_peer:
  965. * @clear_peer:
  966. * @cfg_attach:
  967. * @find_peer_by_addr:
  968. * @find_peer_by_addr_and_vdev:
  969. * @local_peer_id:
  970. * @peer_find_by_local_id:
  971. * @peer_state_update:
  972. * @get_vdevid:
  973. * @get_vdev_by_sta_id:
  974. * @register_ocb_peer:
  975. * @peer_get_peer_mac_addr:
  976. * @get_peer_state:
  977. * @get_vdev_for_peer:
  978. * @update_ibss_add_peer_num_of_vdev:
  979. * @remove_peers_for_vdev:
  980. * @remove_peers_for_vdev_no_lock:
  981. * @copy_mac_addr_raw:
  982. * @add_last_real_peer:
  983. * @is_vdev_restore_last_peer:
  984. * @update_last_real_peer:
  985. */
  986. struct cdp_peer_ops {
  987. QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
  988. struct ol_txrx_desc_type *sta_desc);
  989. QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev, uint8_t sta_id);
  990. QDF_STATUS (*change_peer_state)(uint8_t sta_id,
  991. enum ol_txrx_peer_state sta_state,
  992. bool roam_synch_in_progress);
  993. void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
  994. u8 *peer_addr, uint8_t *peer_id,
  995. enum peer_debug_id_type debug_id);
  996. void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
  997. void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
  998. uint8_t *peer_addr, uint8_t *peer_id);
  999. void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
  1000. struct cdp_vdev *vdev,
  1001. uint8_t *peer_addr, uint8_t *peer_id);
  1002. uint16_t (*local_peer_id)(void *peer);
  1003. void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
  1004. uint8_t local_peer_id);
  1005. QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
  1006. uint8_t *peer_addr,
  1007. enum ol_txrx_peer_state state);
  1008. QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
  1009. struct cdp_vdev * (*get_vdev_by_sta_id)(struct cdp_pdev *pdev,
  1010. uint8_t sta_id);
  1011. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id);
  1012. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  1013. int (*get_peer_state)(void *peer);
  1014. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  1015. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
  1016. int16_t peer_num_delta);
  1017. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  1018. ol_txrx_vdev_peer_remove_cb callback,
  1019. void *callback_context, bool remove_last_peer);
  1020. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  1021. ol_txrx_vdev_peer_remove_cb callback,
  1022. void *callback_context);
  1023. void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
  1024. void (*add_last_real_peer)(struct cdp_pdev *pdev,
  1025. struct cdp_vdev *vdev, uint8_t *peer_id);
  1026. bool (*is_vdev_restore_last_peer)(void *peer);
  1027. void (*update_last_real_peer)(struct cdp_pdev *pdev, void *peer,
  1028. uint8_t *peer_id, bool restore_last_peer);
  1029. void (*peer_detach_force_delete)(void *peer);
  1030. };
  1031. /**
  1032. * struct cdp_ocb_ops - mcl ocb ops
  1033. * @throttle_init_period:
  1034. * @throttle_set_level:
  1035. */
  1036. struct cdp_throttle_ops {
  1037. void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
  1038. uint8_t *dutycycle_level);
  1039. void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
  1040. };
  1041. /**
  1042. * struct cdp_ocb_ops - mcl ocb ops
  1043. * @clear_stats:
  1044. * @stats:
  1045. */
  1046. struct cdp_mob_stats_ops {
  1047. void (*clear_stats)(uint16_t bitmap);
  1048. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  1049. };
  1050. #endif /* CONFIG_WIN */
  1051. #ifdef RECEIVE_OFFLOAD
  1052. /**
  1053. * struct cdp_rx_offld_ops - mcl receive offload ops
  1054. * @register_rx_offld_flush_cb:
  1055. * @deregister_rx_offld_flush_cb:
  1056. */
  1057. struct cdp_rx_offld_ops {
  1058. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1059. void (*deregister_rx_offld_flush_cb)(void);
  1060. };
  1061. #endif
  1062. struct cdp_ops {
  1063. struct cdp_cmn_ops *cmn_drv_ops;
  1064. struct cdp_ctrl_ops *ctrl_ops;
  1065. struct cdp_me_ops *me_ops;
  1066. struct cdp_mon_ops *mon_ops;
  1067. struct cdp_host_stats_ops *host_stats_ops;
  1068. struct cdp_wds_ops *wds_ops;
  1069. struct cdp_raw_ops *raw_ops;
  1070. struct cdp_pflow_ops *pflow_ops;
  1071. #ifndef CONFIG_WIN
  1072. struct cdp_misc_ops *misc_ops;
  1073. struct cdp_cfg_ops *cfg_ops;
  1074. struct cdp_flowctl_ops *flowctl_ops;
  1075. struct cdp_lflowctl_ops *l_flowctl_ops;
  1076. #ifdef IPA_OFFLOAD
  1077. struct cdp_ipa_ops *ipa_ops;
  1078. #endif
  1079. #ifdef RECEIVE_OFFLOAD
  1080. struct cdp_rx_offld_ops *rx_offld_ops;
  1081. #endif
  1082. struct cdp_bus_ops *bus_ops;
  1083. struct cdp_ocb_ops *ocb_ops;
  1084. struct cdp_peer_ops *peer_ops;
  1085. struct cdp_throttle_ops *throttle_ops;
  1086. struct cdp_mob_stats_ops *mob_stats_ops;
  1087. struct cdp_tx_delay_ops *delay_ops;
  1088. struct cdp_pmf_ops *pmf_ops;
  1089. #endif /* CONFIG_WIN */
  1090. };
  1091. #endif