cdp_txrx_ops.h 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671
  1. /*
  2. * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #include <wmi_unified_api.h>
  32. #include <wdi_event_api.h>
  33. #ifdef IPA_OFFLOAD
  34. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  35. #include <qdf_ipa_wdi3.h>
  36. #else
  37. #include <qdf_ipa.h>
  38. #endif
  39. #endif
  40. /**
  41. * bitmap values to indicate special handling of peer_delete
  42. */
  43. #define CDP_PEER_DELETE_NO_SPECIAL 0
  44. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  45. struct hif_opaque_softc;
  46. /* same as ieee80211_nac_param */
  47. enum cdp_nac_param_cmd {
  48. /* IEEE80211_NAC_PARAM_ADD */
  49. CDP_NAC_PARAM_ADD = 1,
  50. /* IEEE80211_NAC_PARAM_DEL */
  51. CDP_NAC_PARAM_DEL,
  52. /* IEEE80211_NAC_PARAM_LIST */
  53. CDP_NAC_PARAM_LIST,
  54. };
  55. /******************************************************************************
  56. *
  57. * Control Interface (A Interface)
  58. *
  59. *****************************************************************************/
  60. struct cdp_cmn_ops {
  61. QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  62. int (*txrx_pdev_attach_target)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  63. struct cdp_vdev *(*txrx_vdev_attach)
  64. (struct cdp_soc_t *soc, uint8_t pdev_id, uint8_t *mac,
  65. uint8_t vdev_id, enum wlan_op_mode op_mode,
  66. enum wlan_op_subtype subtype);
  67. QDF_STATUS
  68. (*txrx_vdev_detach)(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
  69. ol_txrx_vdev_delete_cb callback,
  70. void *cb_context);
  71. struct cdp_pdev *(*txrx_pdev_attach)
  72. (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev,
  73. qdf_device_t osdev, uint8_t pdev_id);
  74. int (*txrx_pdev_post_attach)(struct cdp_soc_t *soc, uint8_t pdev_id);
  75. void
  76. (*txrx_pdev_pre_detach)(struct cdp_soc_t *soc, uint8_t pdev_id,
  77. int force);
  78. QDF_STATUS
  79. (*txrx_pdev_detach)(struct cdp_soc_t *psoc, uint8_t pdev_id,
  80. int force);
  81. /**
  82. * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
  83. * @soc: soc dp handle
  84. * @pdev_id: id of Dp pdev handle
  85. * @force: Force deinit or not
  86. *
  87. * Return: QDF_STATUS
  88. */
  89. QDF_STATUS
  90. (*txrx_pdev_deinit)(struct cdp_soc_t *soc, uint8_t pdev_id,
  91. int force);
  92. void *(*txrx_peer_create)
  93. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  94. uint8_t *peer_mac_addr);
  95. QDF_STATUS
  96. (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  97. uint8_t *peer_mac);
  98. QDF_STATUS
  99. (*txrx_cp_peer_del_response)
  100. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  101. uint8_t *peer_mac_addr);
  102. QDF_STATUS
  103. (*txrx_peer_teardown)
  104. (struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *peer_mac);
  105. int (*txrx_peer_add_ast)
  106. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  107. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  108. uint32_t flags);
  109. int (*txrx_peer_update_ast)
  110. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  111. uint8_t *mac_addr, uint32_t flags);
  112. bool (*txrx_peer_get_ast_info_by_soc)
  113. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  114. struct cdp_ast_entry_info *ast_entry_info);
  115. bool (*txrx_peer_get_ast_info_by_pdev)
  116. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  117. uint8_t pdev_id,
  118. struct cdp_ast_entry_info *ast_entry_info);
  119. QDF_STATUS (*txrx_peer_ast_delete_by_soc)
  120. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  121. txrx_ast_free_cb callback,
  122. void *cookie);
  123. QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
  124. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  125. uint8_t pdev_id,
  126. txrx_ast_free_cb callback,
  127. void *cookie);
  128. QDF_STATUS
  129. (*txrx_peer_delete)(struct cdp_soc_t *soc, uint8_t vdev_id,
  130. uint8_t *peer_mac, uint32_t bitmap);
  131. QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_soc_t *soc,
  132. uint8_t vdev_id,
  133. uint8_t smart_monitor);
  134. void (*txrx_peer_delete_sync)(struct cdp_soc_t *soc, uint8_t vdev_id,
  135. uint8_t *peer_mac,
  136. QDF_STATUS(*delete_cb)(
  137. uint8_t vdev_id,
  138. uint32_t peerid_cnt,
  139. uint16_t *peerid_list),
  140. uint32_t bitmap);
  141. void (*txrx_peer_unmap_sync_cb_set)(struct cdp_soc_t *soc_hdl,
  142. uint8_t pdev_id,
  143. ol_txrx_peer_unmap_sync_cb
  144. peer_unmap_sync);
  145. uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev);
  146. bool (*txrx_get_vow_config_frm_pdev)(struct cdp_pdev *pdev);
  147. void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev,
  148. int16_t chan_noise_floor);
  149. void
  150. (*txrx_set_nac)(ol_txrx_soc_handle soc, uint8_t vdev_id,
  151. uint8_t *peer_mac);
  152. /**
  153. * txrx_set_pdev_tx_capture() - callback to set pdev tx_capture
  154. * @soc: opaque soc handle
  155. * @pdev: data path pdev handle
  156. * @val: value of pdev_tx_capture
  157. *
  158. * Return: status: 0 - Success, non-zero: Failure
  159. */
  160. QDF_STATUS (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val);
  161. QDF_STATUS
  162. (*txrx_get_peer_mac_from_peer_id)
  163. (struct cdp_soc_t *cdp_soc,
  164. uint32_t peer_id, uint8_t *peer_mac);
  165. void
  166. (*txrx_vdev_tx_lock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  167. void
  168. (*txrx_vdev_tx_unlock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  169. QDF_STATUS
  170. (*txrx_ath_getstats)(struct cdp_soc_t *soc, uint8_t id,
  171. struct cdp_dev_stats *stats, uint8_t type);
  172. QDF_STATUS
  173. (*txrx_set_gid_flag)(struct cdp_soc_t *soc, uint8_t pdev_id,
  174. u_int8_t *mem_status,
  175. u_int8_t *user_position);
  176. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_soc_t *soc,
  177. uint8_t pdev_id);
  178. QDF_STATUS
  179. (*txrx_if_mgmt_drain)(struct cdp_soc_t *soc, uint8_t pdev_id,
  180. int force);
  181. QDF_STATUS
  182. (*txrx_set_curchan)(struct cdp_soc_t *soc, uint8_t pdev_id,
  183. uint32_t chan_mhz);
  184. QDF_STATUS
  185. (*txrx_set_privacy_filters)
  186. (struct cdp_soc_t *soc, uint8_t vdev_id, void *filter,
  187. uint32_t num);
  188. uint32_t (*txrx_get_cfg)(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg);
  189. /********************************************************************
  190. * Data Interface (B Interface)
  191. ********************************************************************/
  192. QDF_STATUS
  193. (*txrx_vdev_register)(struct cdp_soc_t *soc, uint8_t vdev_id,
  194. ol_osif_vdev_handle osif_vdev,
  195. struct ol_txrx_ops *txrx_ops);
  196. int (*txrx_mgmt_send)(struct cdp_soc_t *soc, uint8_t vdev_id,
  197. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  198. int (*txrx_mgmt_send_ext)(struct cdp_soc_t *soc, uint8_t vdev_id,
  199. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  200. uint8_t use_6mbps, uint16_t chanfreq);
  201. /**
  202. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  203. * callback function
  204. */
  205. QDF_STATUS
  206. (*txrx_mgmt_tx_cb_set)(struct cdp_soc_t *soc, uint8_t pdev_id,
  207. uint8_t type,
  208. ol_txrx_mgmt_tx_cb download_cb,
  209. ol_txrx_mgmt_tx_cb ota_ack_cb,
  210. void *ctxt);
  211. int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
  212. /**
  213. * ol_txrx_data_tx_cb - Function registered with the data path
  214. * that is called when tx frames marked as "no free" are
  215. * done being transmitted
  216. */
  217. void (*txrx_data_tx_cb_set)(struct cdp_soc_t *soc, uint8_t vdev_id,
  218. ol_txrx_data_tx_cb callback, void *ctxt);
  219. /*******************************************************************
  220. * Statistics and Debugging Interface (C Interface)
  221. ********************************************************************/
  222. int (*txrx_aggr_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  223. int max_subfrms_ampdu,
  224. int max_subfrms_amsdu);
  225. A_STATUS
  226. (*txrx_fw_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  227. struct ol_txrx_stats_req *req,
  228. bool per_vdev, bool response_expected);
  229. int (*txrx_debug)(struct cdp_soc_t *soc, uint8_t vdev_id,
  230. int debug_specs);
  231. QDF_STATUS
  232. (*txrx_fw_stats_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  233. uint8_t cfg_stats_type, uint32_t cfg_val);
  234. void (*txrx_print_level_set)(unsigned level);
  235. /**
  236. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  237. * @soc: datapath soc handle
  238. * @vdev_id: vdev id
  239. *
  240. * Return: vdev mac address
  241. */
  242. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_soc_t *soc,
  243. uint8_t vdev_id);
  244. /**
  245. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  246. * @soc: datapath soc handle
  247. * @vdev_id: vdev id
  248. *
  249. * Return: Handle to control pdev
  250. */
  251. struct cdp_cfg *(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_soc_t *soc,
  252. uint8_t vdev_id);
  253. /**
  254. * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
  255. * @soc: datapath soc handle
  256. * @pdev: pdev id
  257. *
  258. * Return: vdev_id
  259. */
  260. uint8_t (*txrx_get_mon_vdev_from_pdev)(struct cdp_soc_t *soc,
  261. uint8_t pdev_id);
  262. void (*txrx_soc_detach)(struct cdp_soc_t *soc);
  263. /**
  264. * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
  265. * @soc: Opaque Dp handle
  266. *
  267. * Return None
  268. */
  269. void (*txrx_soc_deinit)(struct cdp_soc_t *soc);
  270. /**
  271. * txrx_soc_init() - Initialize dp soc and dp ring memory
  272. * @soc: Opaque Dp handle
  273. * @ctrl_psoc: Opaque Cp handle
  274. * @htchdl: Opaque htc handle
  275. * @hifhdl: Opaque hif handle
  276. *
  277. * Return: None
  278. */
  279. void *(*txrx_soc_init)(struct cdp_soc_t *soc,
  280. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  281. struct hif_opaque_softc *hif_handle,
  282. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  283. struct ol_if_ops *ol_ops, uint16_t device_id);
  284. /**
  285. * txrx_tso_soc_attach() - TSO attach handler triggered during
  286. * dynamic tso activation
  287. * @soc: Opaque Dp handle
  288. *
  289. * Return: QDF status
  290. */
  291. QDF_STATUS (*txrx_tso_soc_attach)(struct cdp_soc_t *soc);
  292. /**
  293. * txrx_tso_soc_detach() - TSO detach handler triggered during
  294. * dynamic tso de-activation
  295. * @soc: Opaque Dp handle
  296. *
  297. * Return: QDF status
  298. */
  299. QDF_STATUS (*txrx_tso_soc_detach)(struct cdp_soc_t *soc);
  300. int (*addba_resp_tx_completion)(struct cdp_soc_t *cdp_soc,
  301. uint8_t *peer_mac,
  302. uint16_t vdev_id, uint8_t tid,
  303. int status);
  304. int (*addba_requestprocess)(struct cdp_soc_t *cdp_soc,
  305. uint8_t *peer_mac,
  306. uint16_t vdev_id,
  307. uint8_t dialogtoken,
  308. uint16_t tid, uint16_t batimeout,
  309. uint16_t buffersize,
  310. uint16_t startseqnum);
  311. QDF_STATUS
  312. (*addba_responsesetup)(struct cdp_soc_t *cdp_soc,
  313. uint8_t *peer_mac,
  314. uint16_t vdev_id, uint8_t tid,
  315. uint8_t *dialogtoken, uint16_t *statuscode,
  316. uint16_t *buffersize, uint16_t *batimeout);
  317. int (*delba_process)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  318. uint16_t vdev_id, int tid, uint16_t reasoncode);
  319. /**
  320. * delba_tx_completion() - Indicate delba tx status
  321. * @cdp_soc: soc handle
  322. * @peer_mac: Peer mac address
  323. * @vdev_id: vdev id
  324. * @tid: Tid number
  325. * @status: Tx completion status
  326. *
  327. * Return: 0 on Success, 1 on failure
  328. */
  329. int (*delba_tx_completion)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  330. uint16_t vdev_id,
  331. uint8_t tid, int status);
  332. QDF_STATUS
  333. (*set_addba_response)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  334. uint16_t vdev_id, uint8_t tid,
  335. uint16_t statuscode);
  336. QDF_STATUS
  337. (*set_vdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  338. uint8_t vdev_id, uint8_t map_id);
  339. int (*txrx_get_total_per)(struct cdp_soc_t *soc, uint8_t pdev_id);
  340. void (*flush_cache_rx_queue)(void);
  341. QDF_STATUS (*set_pdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  342. uint8_t pdev_id,
  343. uint8_t map_id,
  344. uint8_t tos, uint8_t tid);
  345. void (*hmmc_tid_override_en)(struct cdp_pdev *pdev, bool val);
  346. void (*set_hmmc_tid_val)(struct cdp_pdev *pdev, uint8_t tid);
  347. QDF_STATUS (*txrx_stats_request)(struct cdp_soc_t *soc_handle,
  348. uint8_t vdev_id,
  349. struct cdp_txrx_stats_req *req);
  350. QDF_STATUS (*display_stats)(struct cdp_soc_t *psoc, uint16_t value,
  351. enum qdf_stats_verbosity_level level);
  352. void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
  353. int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
  354. QDF_STATUS (*txrx_intr_attach)(struct cdp_soc_t *soc_handle);
  355. void (*txrx_intr_detach)(struct cdp_soc_t *soc_handle);
  356. QDF_STATUS (*set_pn_check)(struct cdp_soc_t *soc_handle,
  357. uint8_t vdev_id, uint8_t *peermac,
  358. enum cdp_sec_type sec_type,
  359. uint32_t *rx_pn);
  360. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  361. struct cdp_config_params *params);
  362. void *(*get_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  363. void (*set_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  364. void *dp_hdl);
  365. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  366. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  367. void *dp_txrx_handle);
  368. QDF_STATUS (*map_pdev_to_lmac)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  369. uint32_t lmac_id);
  370. QDF_STATUS (*set_pdev_status_down)(struct cdp_soc_t *soc_handle,
  371. uint8_t pdev_id, bool is_pdev_down);
  372. QDF_STATUS (*txrx_peer_reset_ast)
  373. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
  374. uint8_t *peer_macaddr, uint8_t vdev_id);
  375. QDF_STATUS (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  376. uint8_t vdev_id);
  377. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  378. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  379. uint8_t ac, uint32_t value);
  380. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  381. uint8_t ac, uint32_t *value);
  382. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  383. uint32_t num_peers,
  384. uint32_t max_ast_index,
  385. bool peer_map_unmap_v2);
  386. ol_txrx_tx_fp tx_send;
  387. /**
  388. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  389. * to deliver pkt to stack.
  390. * @soc: datapath soc handle
  391. * @vdev: vdev id
  392. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  393. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  394. */
  395. void (*txrx_get_os_rx_handles_from_vdev)
  396. (ol_txrx_soc_handle soc,
  397. uint8_t vdev_id,
  398. ol_txrx_rx_fp *stack_fn,
  399. ol_osif_vdev_handle *osif_vdev);
  400. int (*txrx_classify_update)
  401. (struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t skb,
  402. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  403. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  404. enum cdp_capabilities dp_caps);
  405. void (*set_rate_stats_ctx)(struct cdp_soc_t *soc, void *ctx);
  406. void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
  407. QDF_STATUS (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
  408. uint8_t pdev_id,
  409. void *buf);
  410. QDF_STATUS (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
  411. uint8_t pdev_id);
  412. QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_soc_t *soc,
  413. uint8_t pdev_id,
  414. uint8_t pcp, uint8_t tid);
  415. QDF_STATUS (*set_pdev_tidmap_prty)(struct cdp_pdev *pdev, uint8_t prty);
  416. QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_soc_t *soc,
  417. uint8_t vdev_id,
  418. uint8_t pcp, uint8_t tid);
  419. QDF_STATUS (*set_vdev_tidmap_prty)(struct cdp_vdev *vdev, uint8_t prty);
  420. QDF_STATUS (*set_vdev_tidmap_tbl_id)(struct cdp_vdev *vdev,
  421. uint8_t mapid);
  422. #ifdef QCA_MULTIPASS_SUPPORT
  423. QDF_STATUS (*set_vlan_groupkey)(struct cdp_soc_t *soc, uint8_t vdev_id,
  424. uint16_t vlan_id, uint16_t group_key);
  425. #endif
  426. };
  427. struct cdp_ctrl_ops {
  428. int
  429. (*txrx_mempools_attach)(void *ctrl_pdev);
  430. int
  431. (*txrx_set_filter_neighbour_peers)(
  432. struct cdp_pdev *pdev,
  433. uint32_t val);
  434. int
  435. (*txrx_update_filter_neighbour_peers)(
  436. struct cdp_vdev *vdev,
  437. uint32_t cmd, uint8_t *macaddr);
  438. /**
  439. * @brief set the safemode of the device
  440. * @details
  441. * This flag is used to bypass the encrypt and decrypt processes when
  442. * send and receive packets. It works like open AUTH mode, HW will
  443. * ctreate all packets as non-encrypt frames because no key installed.
  444. * For rx fragmented frames,it bypasses all the rx defragmentaion.
  445. *
  446. * @param vdev - the data virtual device object
  447. * @param val - the safemode state
  448. * @return - void
  449. */
  450. void
  451. (*txrx_set_safemode)(
  452. struct cdp_vdev *vdev,
  453. u_int32_t val);
  454. /**
  455. * @brief configure the drop unencrypted frame flag
  456. * @details
  457. * Rx related. When set this flag, all the unencrypted frames
  458. * received over a secure connection will be discarded
  459. *
  460. * @param vdev - the data virtual device object
  461. * @param val - flag
  462. * @return - void
  463. */
  464. void
  465. (*txrx_set_drop_unenc)(
  466. struct cdp_vdev *vdev,
  467. u_int32_t val);
  468. /**
  469. * @brief set the Tx encapsulation type of the VDEV
  470. * @details
  471. * This will be used to populate the HTT desc packet type field
  472. * during Tx
  473. * @param vdev - the data virtual device object
  474. * @param val - the Tx encap type
  475. * @return - void
  476. */
  477. void
  478. (*txrx_set_tx_encap_type)(
  479. struct cdp_vdev *vdev,
  480. enum htt_cmn_pkt_type val);
  481. /**
  482. * @brief set the Rx decapsulation type of the VDEV
  483. * @details
  484. * This will be used to configure into firmware and hardware
  485. * which format to decap all Rx packets into, for all peers under
  486. * the VDEV.
  487. * @param vdev - the data virtual device object
  488. * @param val - the Rx decap mode
  489. * @return - void
  490. */
  491. void
  492. (*txrx_set_vdev_rx_decap_type)(
  493. struct cdp_vdev *vdev,
  494. enum htt_cmn_pkt_type val);
  495. /**
  496. * @brief get the Rx decapsulation type of the VDEV
  497. *
  498. * @param vdev - the data virtual device object
  499. * @return - the Rx decap type
  500. */
  501. enum htt_cmn_pkt_type
  502. (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
  503. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  504. /**
  505. * @brief Update the authorize peer object at association time
  506. * @details
  507. * For the host-based implementation of rate-control, it
  508. * updates the peer/node-related parameters within rate-control
  509. * context of the peer at association.
  510. *
  511. * @param peer - pointer to the node's object
  512. * @authorize - either to authorize or unauthorize peer
  513. *
  514. * @return none
  515. */
  516. void
  517. (*txrx_peer_authorize)(struct cdp_peer *peer,
  518. u_int32_t authorize);
  519. /* Should be ol_txrx_ctrl_api.h */
  520. void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
  521. /**
  522. * @brief setting mesh rx filter
  523. * @details
  524. * based on the bits enabled in the filter packets has to be dropped.
  525. *
  526. * @param vdev - the data virtual device object
  527. * @param val - value to set
  528. */
  529. void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
  530. void (*tx_flush_buffers)(struct cdp_vdev *vdev);
  531. int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
  532. void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
  533. enum cdp_vdev_param_type param, uint32_t val);
  534. void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
  535. /**
  536. * @brief Set the reo dest ring num of the radio
  537. * @details
  538. * Set the reo destination ring no on which we will receive
  539. * pkts for this radio.
  540. *
  541. * @param pdev - the data physical device object
  542. * @param reo_dest_ring_num - value ranges between 1 - 4
  543. */
  544. void (*txrx_set_pdev_reo_dest)(
  545. struct cdp_pdev *pdev,
  546. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  547. /**
  548. * @brief Get the reo dest ring num of the radio
  549. * @details
  550. * Get the reo destination ring no on which we will receive
  551. * pkts for this radio.
  552. *
  553. * @param pdev - the data physical device object
  554. * @return the reo destination ring number
  555. */
  556. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  557. struct cdp_pdev *pdev);
  558. int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
  559. uint32_t event);
  560. int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
  561. uint32_t event);
  562. int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
  563. void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
  564. uint8_t subtype, uint8_t tx_power);
  565. /**
  566. * txrx_set_pdev_param() - callback to set pdev parameter
  567. * @soc: opaque soc handle
  568. * @pdev: data path pdev handle
  569. * @val: value of pdev_tx_capture
  570. *
  571. * Return: status: 0 - Success, non-zero: Failure
  572. */
  573. QDF_STATUS (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
  574. enum cdp_pdev_param_type type,
  575. uint32_t val);
  576. void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
  577. #ifdef ATH_SUPPORT_NAC_RSSI
  578. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev,
  579. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  580. uint8_t chan_num);
  581. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev,
  582. char *macaddr,
  583. uint8_t *rssi);
  584. #endif
  585. void (*set_key)(struct cdp_peer *peer_handle,
  586. bool is_unicast, uint32_t *key);
  587. uint32_t (*txrx_get_vdev_param)(struct cdp_vdev *vdev,
  588. enum cdp_vdev_param_type param);
  589. int (*enable_peer_based_pktlog)(struct cdp_pdev
  590. *txrx_pdev_handle, char *macaddr, uint8_t enb_dsb);
  591. void (*calculate_delay_stats)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf);
  592. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  593. QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
  594. struct cdp_pdev *txrx_pdev_handle,
  595. uint32_t protocol_mask, uint16_t protocol_type,
  596. uint16_t tag);
  597. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  598. void (*txrx_dump_pdev_rx_protocol_tag_stats)(
  599. struct cdp_pdev *txrx_pdev_handle,
  600. uint16_t protocol_type);
  601. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  602. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  603. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  604. QDF_STATUS (*txrx_set_rx_flow_tag)(
  605. struct cdp_pdev *txrx_pdev_handle,
  606. struct cdp_rx_flow_info *flow_info);
  607. QDF_STATUS (*txrx_dump_rx_flow_tag_stats)(
  608. struct cdp_pdev *txrx_pdev_handle,
  609. struct cdp_rx_flow_info *flow_info);
  610. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  611. #ifdef QCA_MULTIPASS_SUPPORT
  612. void (*txrx_peer_set_vlan_id)(ol_txrx_soc_handle soc,
  613. struct cdp_vdev *vdev, uint8_t *peer_mac,
  614. uint16_t vlan_id);
  615. #endif
  616. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  617. QDF_STATUS (*txrx_update_peer_pkt_capture_params)(
  618. struct cdp_pdev *txrx_pdev_handle,
  619. bool is_rx_pkt_cap_enable, bool is_tx_pkt_cap_enable,
  620. uint8_t *peer_mac);
  621. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  622. };
  623. struct cdp_me_ops {
  624. u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
  625. (struct cdp_pdev *pdev, u_int16_t buf_count);
  626. u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
  627. struct cdp_pdev *pdev,
  628. u_int16_t buf_count);
  629. u_int16_t
  630. (*tx_get_mcast_buf_allocated_marked)
  631. (struct cdp_pdev *pdev);
  632. void
  633. (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
  634. void
  635. (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
  636. uint16_t
  637. (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
  638. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  639. uint8_t newmaccnt);
  640. /* Should be a function pointer in ol_txrx_osif_ops{} */
  641. /**
  642. * @brief notify mcast frame indication from FW.
  643. * @details
  644. * This notification will be used to convert
  645. * multicast frame to unicast.
  646. *
  647. * @param pdev - handle to the ctrl SW's physical device object
  648. * @param vdev_id - ID of the virtual device received the special data
  649. * @param msdu - the multicast msdu returned by FW for host inspect
  650. */
  651. int (*mcast_notify)(struct cdp_pdev *pdev,
  652. u_int8_t vdev_id, qdf_nbuf_t msdu);
  653. };
  654. struct cdp_mon_ops {
  655. void (*txrx_monitor_set_filter_ucast_data)
  656. (struct cdp_pdev *, u_int8_t val);
  657. void (*txrx_monitor_set_filter_mcast_data)
  658. (struct cdp_pdev *, u_int8_t val);
  659. void (*txrx_monitor_set_filter_non_data)
  660. (struct cdp_pdev *, u_int8_t val);
  661. bool (*txrx_monitor_get_filter_ucast_data)
  662. (struct cdp_vdev *vdev_txrx_handle);
  663. bool (*txrx_monitor_get_filter_mcast_data)
  664. (struct cdp_vdev *vdev_txrx_handle);
  665. bool (*txrx_monitor_get_filter_non_data)
  666. (struct cdp_vdev *vdev_txrx_handle);
  667. QDF_STATUS (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
  668. /* HK advance monitor filter support */
  669. QDF_STATUS (*txrx_set_advance_monitor_filter)
  670. (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
  671. void (*txrx_monitor_record_channel)
  672. (struct cdp_pdev *, int val);
  673. void (*txrx_deliver_tx_mgmt)
  674. (struct cdp_pdev *pdev, qdf_nbuf_t nbuf);
  675. void (*txrx_set_bsscolor)
  676. (struct cdp_pdev *pdev, uint8_t bsscolor);
  677. };
  678. #ifdef WLAN_FEATURE_PKT_CAPTURE
  679. struct cdp_pktcapture_ops {
  680. void (*txrx_pktcapture_set_mode)
  681. (struct cdp_soc_t *soc,
  682. uint8_t pdev_id,
  683. uint8_t mode);
  684. uint8_t (*txrx_pktcapture_get_mode)
  685. (struct cdp_soc_t *soc,
  686. uint8_t pdev_id);
  687. QDF_STATUS (*txrx_pktcapture_cb_register)
  688. (struct cdp_soc_t *soc,
  689. uint8_t pdev_id,
  690. void *context,
  691. QDF_STATUS(cb)(void *, qdf_nbuf_t));
  692. QDF_STATUS (*txrx_pktcapture_cb_deregister)
  693. (struct cdp_soc_t *soc,
  694. uint8_t pdev_id);
  695. QDF_STATUS (*txrx_pktcapture_mgmtpkt_process)
  696. (struct cdp_soc_t *soc,
  697. uint8_t pdev_id,
  698. struct mon_rx_status *txrx_status,
  699. qdf_nbuf_t nbuf, uint8_t status);
  700. void (*txrx_pktcapture_record_channel)
  701. (struct cdp_soc_t *soc,
  702. uint8_t pdev_id,
  703. int chan_no);
  704. };
  705. #endif /* #ifdef WLAN_FEATURE_PKT_CAPTURE */
  706. struct cdp_host_stats_ops {
  707. int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  708. struct ol_txrx_stats_req *req);
  709. QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc,
  710. uint8_t vdev_id);
  711. QDF_STATUS
  712. (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  713. int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id,
  714. struct cdp_stats_extd *buf);
  715. /**
  716. * @brief Enable enhanced stats functionality.
  717. *
  718. * @param soc - the soc handle
  719. * @param pdev_id - pdev_id of pdev
  720. * @return - QDF_STATUS
  721. */
  722. QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc,
  723. uint8_t pdev_id);
  724. /**
  725. * @brief Disable enhanced stats functionality.
  726. *
  727. * @param soc - the soc handle
  728. * @param pdev_id - pdev_id of pdev
  729. * @return - QDF_STATUS
  730. */
  731. QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc,
  732. uint8_t pdev_id);
  733. QDF_STATUS
  734. (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  735. QDF_STATUS
  736. (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  737. QDF_STATUS
  738. (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  739. QDF_STATUS
  740. (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  741. QDF_STATUS
  742. (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  743. QDF_STATUS
  744. (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  745. QDF_STATUS
  746. (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  747. QDF_STATUS
  748. (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr);
  749. int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  750. struct ol_txrx_stats_req *req);
  751. int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc,
  752. uint8_t pdev_id,
  753. uint8_t *addr, void *stats,
  754. uint32_t last_tx_rate_mcs,
  755. uint32_t stats_id);
  756. QDF_STATUS
  757. (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  758. uint8_t *addr,
  759. uint32_t cap, uint32_t copy_stats);
  760. QDF_STATUS
  761. (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  762. void *data,
  763. uint32_t data_len);
  764. QDF_STATUS
  765. (*txrx_update_pdev_stats)(struct cdp_soc_t *soc,
  766. uint8_t pdev_id, void *data,
  767. uint16_t stats_id);
  768. QDF_STATUS
  769. (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  770. uint8_t *peer_mac,
  771. struct cdp_peer_stats *peer_stats);
  772. QDF_STATUS
  773. (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc,
  774. uint8_t vdev_id,
  775. uint8_t *peer_mac);
  776. QDF_STATUS
  777. (*txrx_reset_peer_stats)(struct cdp_soc_t *soc,
  778. uint8_t vdev_id, uint8_t *peer_mac);
  779. int
  780. (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  781. void *buf, bool is_aggregate);
  782. int
  783. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  784. void *data, uint32_t len,
  785. uint32_t stats_id);
  786. int
  787. (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc,
  788. uint8_t vdev_id,
  789. wmi_host_vdev_extd_stats *buffer);
  790. QDF_STATUS
  791. (*txrx_update_vdev_stats)(struct cdp_soc_t *soc,
  792. uint8_t vdev_id, void *buf,
  793. uint16_t stats_id);
  794. int
  795. (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  796. void *buf);
  797. QDF_STATUS
  798. (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  799. struct cdp_pdev_stats *buf);
  800. int
  801. (*txrx_get_ratekbps)(int preamb, int mcs,
  802. int htflag, int gintval);
  803. QDF_STATUS
  804. (*configure_rate_stats)(struct cdp_soc_t *soc, uint8_t val);
  805. QDF_STATUS
  806. (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  807. uint8_t *peer_mac, void *stats,
  808. uint32_t last_tx_rate_mcs,
  809. uint32_t stats_id);
  810. };
  811. struct cdp_wds_ops {
  812. QDF_STATUS
  813. (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id,
  814. u_int32_t val);
  815. QDF_STATUS
  816. (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc,
  817. uint8_t vdev_id, uint8_t *peer_mac,
  818. int wds_tx_ucast, int wds_tx_mcast);
  819. int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id,
  820. uint32_t val);
  821. };
  822. struct cdp_raw_ops {
  823. int (*txrx_get_nwifi_mode)(struct cdp_soc_t *soc, uint8_t vdev_id);
  824. QDF_STATUS
  825. (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id,
  826. qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast);
  827. };
  828. #ifdef PEER_FLOW_CONTROL
  829. struct cdp_pflow_ops {
  830. uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc,
  831. uint8_t pdev_id,
  832. enum _ol_ath_param_t,
  833. uint32_t, void *);
  834. };
  835. #endif /* PEER_FLOW_CONTROL */
  836. #define LRO_IPV4_SEED_ARR_SZ 5
  837. #define LRO_IPV6_SEED_ARR_SZ 11
  838. /**
  839. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  840. * @lro_enable: indicates whether rx_offld is enabled
  841. * @tcp_flag: If the TCP flags from the packet do not match
  842. * the values in this field after masking with TCP flags mask
  843. * below, packet is not rx_offld eligible
  844. * @tcp_flag_mask: field for comparing the TCP values provided
  845. * above with the TCP flags field in the received packet
  846. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  847. * 5-tuple toeplitz hash for ipv4 packets
  848. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  849. * 5-tuple toeplitz hash for ipv6 packets
  850. */
  851. struct cdp_lro_hash_config {
  852. uint32_t lro_enable;
  853. uint32_t tcp_flag:9,
  854. tcp_flag_mask:9;
  855. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  856. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  857. };
  858. struct ol_if_ops {
  859. void
  860. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  861. uint8_t pdev_id, uint8_t *peer_macaddr,
  862. uint8_t vdev_id,
  863. bool hash_based, uint8_t ring_num);
  864. QDF_STATUS
  865. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  866. uint8_t pdev_id,
  867. uint8_t vdev_id, uint8_t *peer_mac,
  868. qdf_dma_addr_t hw_qdesc, int tid,
  869. uint16_t queue_num,
  870. uint8_t ba_window_size_valid,
  871. uint16_t ba_window_size);
  872. QDF_STATUS
  873. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  874. uint8_t pdev_id,
  875. uint8_t vdev_id, uint8_t *peer_macaddr,
  876. uint32_t tid_mask);
  877. int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc,
  878. uint8_t pdev_id,
  879. uint8_t *peer_mac,
  880. uint8_t *vdev_mac, enum wlan_op_mode opmode);
  881. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  882. int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  883. uint8_t vdev_id,
  884. uint8_t *peer_macaddr,
  885. const uint8_t *dest_macaddr,
  886. uint8_t *next_node_mac,
  887. uint32_t flags);
  888. int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  889. uint8_t vdev_id,
  890. uint8_t *dest_macaddr,
  891. uint8_t *peer_macaddr,
  892. uint32_t flags);
  893. void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  894. uint8_t vdev_id,
  895. uint8_t *wds_macaddr,
  896. uint8_t type);
  897. QDF_STATUS
  898. (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id,
  899. struct cdp_lro_hash_config *rx_offld_hash);
  900. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  901. uint8_t type);
  902. #ifdef FEATURE_NAC_RSSI
  903. uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc,
  904. uint8_t pdev_id, void *msg);
  905. #else
  906. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  907. #endif
  908. int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  909. uint16_t peer_id, uint16_t hw_peer_id,
  910. uint8_t vdev_id, uint8_t *peer_mac_addr,
  911. enum cdp_txrx_ast_entry_type peer_type,
  912. uint32_t tx_ast_hashidx);
  913. int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  914. uint16_t peer_id,
  915. uint8_t vdev_id);
  916. int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc,
  917. enum cdp_cfg_param_type param_num);
  918. void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc,
  919. uint8_t pdev_id,
  920. struct cdp_rx_mic_err_info *info);
  921. bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc,
  922. uint8_t vdev_id, uint8_t *peer_mac_addr,
  923. qdf_nbuf_t nbuf,
  924. uint16_t hdr_space);
  925. uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc,
  926. uint8_t vdev_id, uint16_t freq);
  927. #ifdef ATH_SUPPORT_NAC_RSSI
  928. int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  929. uint8_t pdev_id,
  930. u_int8_t vdev_id,
  931. enum cdp_nac_param_cmd cmd, char *bssid,
  932. char *client_macaddr, uint8_t chan_num);
  933. int
  934. (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  935. uint8_t pdev_id, u_int8_t vdev_id,
  936. enum cdp_nac_param_cmd cmd,
  937. char *bssid, char *client_mac);
  938. #endif
  939. int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc,
  940. uint16_t pdev_id, uint8_t *peer_macaddr);
  941. /**
  942. * send_delba() - Send delba to peer
  943. * @psoc: Objmgr soc handle
  944. * @vdev_id: dp vdev id
  945. * @peer_macaddr: Peer mac addr
  946. * @tid: Tid number
  947. *
  948. * Return: 0 for success, non-zero for failure
  949. */
  950. int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id,
  951. uint8_t *peer_macaddr, uint8_t tid,
  952. uint8_t reason_code);
  953. int
  954. (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc,
  955. uint8_t vdev_id,
  956. uint8_t *dest_macaddr,
  957. uint8_t *peer_macaddr,
  958. uint32_t flags);
  959. bool (*is_roam_inprogress)(uint32_t vdev_id);
  960. enum QDF_GLOBAL_MODE (*get_con_mode)(void);
  961. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  962. };
  963. #ifdef DP_PEER_EXTENDED_API
  964. /**
  965. * struct cdp_misc_ops - mcl ops not classified
  966. * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer
  967. * @set_wmm_param: set wmm parameters
  968. * @bad_peer_txctl_set_setting: configure bad peer tx limit setting
  969. * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit
  970. * @hl_tdls_flag_reset: reset tdls flag for vdev
  971. * @tx_non_std: Allow the control-path SW to send data frames
  972. * @get_vdev_id: get vdev id
  973. * @set_wisa_mode: set wisa mode for a vdev
  974. * @txrx_data_stall_cb_register: register data stall callback
  975. * @txrx_data_stall_cb_deregister: deregister data stall callback
  976. * @txrx_post_data_stall_event: post data stall event
  977. * @runtime_suspend: ensure TXRX is ready to runtime suspend
  978. * @runtime_resume: ensure TXRX is ready to runtime resume
  979. * @get_opmode: get operation mode of vdev
  980. * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for
  981. marking first packet after wow wakeup
  982. * @update_mac_id: update mac_id for vdev
  983. * @flush_rx_frames: flush rx frames on the queue
  984. * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that
  985. has been forwarded from txrx layer
  986. without going to upper layers
  987. * @pkt_log_init: handler to initialize packet log
  988. * @pkt_log_con_service: handler to connect packet log service
  989. * @get_num_rx_contexts: handler to get number of RX contexts
  990. * @register_packetdump_cb: register callback for different pktlog
  991. * @unregister_packetdump_cb: unregister callback for different pktlog
  992. * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag
  993. * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag
  994. *
  995. * Function pointers for miscellaneous soc/pdev/vdev related operations.
  996. */
  997. struct cdp_misc_ops {
  998. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl,
  999. uint8_t vdev_id,
  1000. uint16_t timer_value_sec);
  1001. void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1002. struct ol_tx_wmm_param_t wmm_param);
  1003. void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl,
  1004. uint8_t pdev_id, int enable,
  1005. int period, int txq_limit);
  1006. void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl,
  1007. uint8_t pdev_id,
  1008. int level, int tput_thresh,
  1009. int tx_limit);
  1010. void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl,
  1011. uint8_t vdev_id, bool flag);
  1012. qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1013. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  1014. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  1015. uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl,
  1016. uint8_t vdev_id);
  1017. QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl,
  1018. uint8_t vdev_id, bool enable);
  1019. QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl,
  1020. uint8_t pdev_id,
  1021. data_stall_detect_cb cb);
  1022. QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl,
  1023. uint8_t pdev_id,
  1024. data_stall_detect_cb cb);
  1025. void (*txrx_post_data_stall_event)(
  1026. struct cdp_soc_t *soc_hdl,
  1027. enum data_stall_log_event_indicator indicator,
  1028. enum data_stall_log_event_type data_stall_type,
  1029. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  1030. enum data_stall_log_recovery_type recovery_type);
  1031. QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl,
  1032. uint8_t pdev_id);
  1033. QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl,
  1034. uint8_t pdev_id);
  1035. int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1036. void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl,
  1037. uint8_t pdev_id, uint8_t value);
  1038. void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1039. uint8_t mac_id);
  1040. void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1041. void *peer, bool drop);
  1042. A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl,
  1043. uint8_t vdev_id,
  1044. uint64_t *fwd_tx_packets,
  1045. uint64_t *fwd_rx_packets);
  1046. void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev,
  1047. void *scn);
  1048. void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl,
  1049. uint8_t pdev_id, void *scn);
  1050. int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl);
  1051. void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1052. ol_txrx_pktdump_cb tx_cb,
  1053. ol_txrx_pktdump_cb rx_cb);
  1054. void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl,
  1055. uint8_t pdev_id);
  1056. void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl,
  1057. uint8_t pdev_id);
  1058. void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl,
  1059. uint8_t vdev_id,
  1060. unsigned long rx_packets,
  1061. uint32_t time_in_ms,
  1062. uint32_t high_th,
  1063. uint32_t low_th);
  1064. void (*vdev_set_bundle_require_flag)(uint8_t vdev_id,
  1065. unsigned long tx_bytes,
  1066. uint32_t time_in_ms,
  1067. uint32_t high_th,
  1068. uint32_t low_th);
  1069. void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl,
  1070. uint8_t pdev_id);
  1071. QDF_STATUS (*txrx_ext_stats_request)(struct cdp_soc_t *soc_hdl,
  1072. uint8_t pdev_id,
  1073. struct cdp_txrx_ext_stats *req);
  1074. void (*request_rx_hw_stats)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1075. QDF_STATUS (*wait_for_ext_rx_stats)(struct cdp_soc_t *soc_hdl);
  1076. };
  1077. /**
  1078. * struct cdp_ocb_ops - mcl ocb ops
  1079. * @set_ocb_chan_info: set OCB channel info
  1080. * @get_ocb_chan_info: get OCB channel info
  1081. *
  1082. * Function pointers for operations related to OCB.
  1083. */
  1084. struct cdp_ocb_ops {
  1085. void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1086. struct ol_txrx_ocb_set_chan ocb_set_chan);
  1087. struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)(
  1088. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1089. };
  1090. /**
  1091. * struct cdp_peer_ops - mcl peer related ops
  1092. * @register_peer:
  1093. * @clear_peer:
  1094. * @cfg_attach:
  1095. * @find_peer_by_addr:
  1096. * @find_peer_by_addr_and_vdev:
  1097. * @local_peer_id:
  1098. * @peer_find_by_local_id:
  1099. * @peer_state_update:
  1100. * @get_vdevid:
  1101. * @get_vdev_by_sta_id:
  1102. * @register_ocb_peer:
  1103. * @peer_get_peer_mac_addr:
  1104. * @get_peer_state:
  1105. * @get_vdev_for_peer:
  1106. * @update_ibss_add_peer_num_of_vdev:
  1107. * @remove_peers_for_vdev:
  1108. * @remove_peers_for_vdev_no_lock:
  1109. * @copy_mac_addr_raw:
  1110. * @add_last_real_peer:
  1111. * @is_vdev_restore_last_peer:
  1112. * @update_last_real_peer:
  1113. */
  1114. struct cdp_peer_ops {
  1115. QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
  1116. struct ol_txrx_desc_type *sta_desc);
  1117. QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev,
  1118. struct qdf_mac_addr peer_addr);
  1119. QDF_STATUS (*change_peer_state)(uint8_t sta_id,
  1120. enum ol_txrx_peer_state sta_state,
  1121. bool roam_synch_in_progress);
  1122. void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
  1123. uint8_t *peer_addr,
  1124. enum peer_debug_id_type debug_id);
  1125. void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
  1126. void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
  1127. uint8_t *peer_addr);
  1128. void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
  1129. struct cdp_vdev *vdev,
  1130. uint8_t *peer_addr);
  1131. QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
  1132. uint8_t *peer_addr,
  1133. enum ol_txrx_peer_state state);
  1134. QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
  1135. struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev,
  1136. struct qdf_mac_addr peer_addr);
  1137. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr);
  1138. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  1139. int (*get_peer_state)(void *peer);
  1140. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  1141. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
  1142. int16_t peer_num_delta);
  1143. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  1144. ol_txrx_vdev_peer_remove_cb callback,
  1145. void *callback_context, bool remove_last_peer);
  1146. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  1147. ol_txrx_vdev_peer_remove_cb callback,
  1148. void *callback_context);
  1149. void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
  1150. void (*add_last_real_peer)(struct cdp_pdev *pdev,
  1151. struct cdp_vdev *vdev);
  1152. bool (*is_vdev_restore_last_peer)(void *peer);
  1153. void (*update_last_real_peer)(struct cdp_pdev *pdev, void *vdev,
  1154. bool restore_last_peer);
  1155. void (*peer_detach_force_delete)(void *peer);
  1156. void (*set_tdls_offchan_enabled)(void *peer, bool val);
  1157. void (*set_peer_as_tdls_peer)(void *peer, bool val);
  1158. };
  1159. /**
  1160. * struct cdp_mob_stats_ops - mcl mob stats ops
  1161. * @clear_stats: handler to clear ol txrx stats
  1162. * @stats: handler to update ol txrx stats
  1163. */
  1164. struct cdp_mob_stats_ops {
  1165. QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl,
  1166. uint8_t pdev_id, uint8_t bitmap);
  1167. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  1168. };
  1169. /**
  1170. * struct cdp_pmf_ops - mcl protected management frame ops
  1171. * @get_pn_info: handler to get pn info from peer
  1172. *
  1173. * Function pointers for pmf related operations.
  1174. */
  1175. struct cdp_pmf_ops {
  1176. void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac,
  1177. uint8_t vdev_id, uint8_t **last_pn_valid,
  1178. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  1179. };
  1180. #endif
  1181. #ifdef DP_FLOW_CTL
  1182. /**
  1183. * struct cdp_cfg_ops - mcl configuration ops
  1184. * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
  1185. * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
  1186. * @cfg_attach: hardcode the configuration parameters
  1187. * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
  1188. * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
  1189. * 1 enabled, 0 disabled.
  1190. * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
  1191. * indicate that mgmt over wmi is enabled
  1192. * or not,
  1193. * 1 for enabled, 0 for disable
  1194. * @is_high_latency: get device is high or low latency device,
  1195. * 1 high latency bus, 0 low latency bus
  1196. * @set_flow_control_parameters: set flow control parameters
  1197. * @set_flow_steering: set flow_steering_enabled flag
  1198. * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
  1199. * @set_new_htt_msg_format: set new_htt_msg_format flag
  1200. * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
  1201. * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
  1202. * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
  1203. * 1 enabled, 0 disabled.
  1204. * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
  1205. * 1 enabled, 0 disabled.
  1206. */
  1207. struct cdp_cfg_ops {
  1208. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  1209. uint8_t disable_rx_fwd);
  1210. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  1211. uint8_t val);
  1212. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  1213. void (*vdev_rx_set_intrabss_fwd)(struct cdp_soc_t *soc_hdl,
  1214. uint8_t vdev_id, bool val);
  1215. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  1216. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  1217. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  1218. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  1219. void *param);
  1220. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1221. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1222. void (*set_new_htt_msg_format)(uint8_t val);
  1223. void (*set_peer_unmap_conf_support)(bool val);
  1224. bool (*get_peer_unmap_conf_support)(void);
  1225. void (*set_tx_compl_tsf64)(bool val);
  1226. bool (*get_tx_compl_tsf64)(void);
  1227. };
  1228. /**
  1229. * struct cdp_flowctl_ops - mcl flow control
  1230. * @flow_pool_map_handler: handler to map flow_id and pool descriptors
  1231. * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors
  1232. * @register_pause_cb: handler to register tx pause callback
  1233. * @set_desc_global_pool_size: handler to set global pool size
  1234. * @dump_flow_pool_info: handler to dump global and flow pool info
  1235. * @tx_desc_thresh_reached: handler to set tx desc threshold
  1236. *
  1237. * Function pointers for operations related to flow control
  1238. */
  1239. struct cdp_flowctl_ops {
  1240. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  1241. uint8_t pdev_id,
  1242. uint8_t vdev_id);
  1243. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  1244. uint8_t pdev_id,
  1245. uint8_t vdev_id);
  1246. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  1247. tx_pause_callback);
  1248. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  1249. void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl);
  1250. bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl,
  1251. uint8_t vdev_id);
  1252. };
  1253. /**
  1254. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  1255. * @register_tx_flow_control: Register tx flow control callback
  1256. * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev
  1257. * @set_vdev_os_queue_status: Set vdev queue status
  1258. * @deregister_tx_flow_control_cb: Deregister tx flow control callback
  1259. * @flow_control_cb: Call osif flow control callback
  1260. * @get_tx_resource: Get tx resources and comapre with watermark
  1261. * @ll_set_tx_pause_q_depth: set pause queue depth
  1262. * @vdev_flush: Flush all packets on a particular vdev
  1263. * @vdev_pause: Pause a particular vdev
  1264. * @vdev_unpause: Unpause a particular vdev
  1265. *
  1266. * Function pointers for operations related to flow control
  1267. */
  1268. struct cdp_lflowctl_ops {
  1269. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  1270. int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl,
  1271. uint8_t pdev_id,
  1272. tx_pause_callback flowcontrol);
  1273. int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl,
  1274. uint8_t vdev_id, uint32_t chan_freq);
  1275. int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl,
  1276. uint8_t vdev_id,
  1277. enum netif_action_type action);
  1278. #else
  1279. int (*register_tx_flow_control)(
  1280. struct cdp_soc_t *soc_hdl,
  1281. uint8_t vdev_id,
  1282. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  1283. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  1284. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  1285. int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl,
  1286. uint8_t vdev_id);
  1287. void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1288. bool tx_resume);
  1289. bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1290. struct qdf_mac_addr peer_addr,
  1291. unsigned int low_watermark,
  1292. unsigned int high_watermark_offset);
  1293. int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1294. int pause_q_depth);
  1295. void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1296. void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1297. uint32_t reason, uint32_t pause_type);
  1298. void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1299. uint32_t reason, uint32_t pause_type);
  1300. };
  1301. /**
  1302. * struct cdp_throttle_ops - mcl throttle ops
  1303. * @throttle_init_period: handler to initialize tx throttle time
  1304. * @throttle_set_level: handler to set tx throttle level
  1305. */
  1306. struct cdp_throttle_ops {
  1307. void (*throttle_init_period)(struct cdp_soc_t *soc_hdl,
  1308. uint8_t pdev_id, int period,
  1309. uint8_t *dutycycle_level);
  1310. void (*throttle_set_level)(struct cdp_soc_t *soc_hdl,
  1311. uint8_t pdev_id, int level);
  1312. };
  1313. #endif
  1314. #ifdef IPA_OFFLOAD
  1315. /**
  1316. * struct cdp_ipa_ops - mcl ipa data path ops
  1317. * @ipa_get_resource:
  1318. * @ipa_set_doorbell_paddr:
  1319. * @ipa_set_active:
  1320. * @ipa_op_response:
  1321. * @ipa_register_op_cb:
  1322. * @ipa_get_stat:
  1323. * @ipa_tx_data_frame:
  1324. */
  1325. struct cdp_ipa_ops {
  1326. QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl,
  1327. uint8_t pdev_id);
  1328. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl,
  1329. uint8_t pdev_id);
  1330. QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1331. bool uc_active, bool is_tx);
  1332. QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl,
  1333. uint8_t pdev_id, uint8_t *op_msg);
  1334. QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl,
  1335. uint8_t pdev_id,
  1336. void (*ipa_uc_op_cb_type)
  1337. (uint8_t *op_msg, void *osif_ctxt),
  1338. void *usr_ctxt);
  1339. QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1340. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl,
  1341. uint8_t vdev_id, qdf_nbuf_t skb);
  1342. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  1343. uint32_t value);
  1344. #ifdef FEATURE_METERING
  1345. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl,
  1346. uint8_t pdev_id,
  1347. uint8_t reset_stats);
  1348. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl,
  1349. uint8_t pdev_id, uint64_t quota_bytes);
  1350. #endif
  1351. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl,
  1352. uint8_t pdev_id);
  1353. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl,
  1354. uint8_t pdev_id);
  1355. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  1356. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1357. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1358. void *ipa_wdi_meter_notifier_cb,
  1359. uint32_t ipa_desc_size, void *ipa_priv,
  1360. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1361. uint32_t *rx_pipe_handle, bool is_smmu_enabled,
  1362. qdf_ipa_sys_connect_params_t *sys_in,
  1363. bool over_gsi);
  1364. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  1365. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1366. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1367. void *ipa_wdi_meter_notifier_cb,
  1368. uint32_t ipa_desc_size, void *ipa_priv,
  1369. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1370. uint32_t *rx_pipe_handle);
  1371. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  1372. QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
  1373. uint32_t rx_pipe_handle);
  1374. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  1375. qdf_ipa_client_type_t prod_client,
  1376. qdf_ipa_client_type_t cons_client,
  1377. uint8_t session_id, bool is_ipv6_enabled);
  1378. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  1379. QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl,
  1380. uint8_t pdev_id);
  1381. QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl,
  1382. uint8_t pdev_id);
  1383. QDF_STATUS (*ipa_set_perf_level)(int client,
  1384. uint32_t max_supported_bw_mbps);
  1385. bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1386. qdf_nbuf_t nbuf, bool *fwd_success);
  1387. };
  1388. #endif
  1389. #ifdef DP_POWER_SAVE
  1390. /**
  1391. * struct cdp_tx_delay_ops - mcl tx delay ops
  1392. * @tx_delay: handler to get tx packet delay
  1393. * @tx_delay_hist: handler to get tx packet delay histogram
  1394. * @tx_packet_count: handler to get tx packet count
  1395. * @tx_set_compute_interval: update compute interval period for TSM stats
  1396. *
  1397. * Function pointer for operations related to tx delay.
  1398. */
  1399. struct cdp_tx_delay_ops {
  1400. void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1401. uint32_t *queue_delay_microsec,
  1402. uint32_t *tx_delay_microsec, int category);
  1403. void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1404. uint16_t *bin_values, int category);
  1405. void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1406. uint16_t *out_packet_count,
  1407. uint16_t *out_packet_loss_count, int category);
  1408. void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl,
  1409. uint8_t pdev_id, uint32_t interval);
  1410. };
  1411. /**
  1412. * struct cdp_bus_ops - mcl bus suspend/resume ops
  1413. * @bus_suspend: handler for bus suspend
  1414. * @bus_resume: handler for bus resume
  1415. */
  1416. struct cdp_bus_ops {
  1417. QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1418. QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1419. };
  1420. #endif
  1421. #ifdef RECEIVE_OFFLOAD
  1422. /**
  1423. * struct cdp_rx_offld_ops - mcl host receive offload ops
  1424. * @register_rx_offld_flush_cb:
  1425. * @deregister_rx_offld_flush_cb:
  1426. */
  1427. struct cdp_rx_offld_ops {
  1428. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1429. void (*deregister_rx_offld_flush_cb)(void);
  1430. };
  1431. #endif
  1432. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1433. /**
  1434. * struct cdp_cfr_ops - host cfr ops
  1435. * @txrx_cfr_filter: Handler to configure host rx monitor status ring
  1436. */
  1437. struct cdp_cfr_ops {
  1438. void (*txrx_cfr_filter)(struct cdp_soc_t *soc_hdl,
  1439. uint8_t pdev_id,
  1440. bool enable,
  1441. struct cdp_monitor_filter *filter_val);
  1442. };
  1443. #endif
  1444. struct cdp_ops {
  1445. struct cdp_cmn_ops *cmn_drv_ops;
  1446. struct cdp_ctrl_ops *ctrl_ops;
  1447. struct cdp_me_ops *me_ops;
  1448. struct cdp_mon_ops *mon_ops;
  1449. struct cdp_host_stats_ops *host_stats_ops;
  1450. struct cdp_wds_ops *wds_ops;
  1451. struct cdp_raw_ops *raw_ops;
  1452. struct cdp_pflow_ops *pflow_ops;
  1453. #ifdef DP_PEER_EXTENDED_API
  1454. struct cdp_misc_ops *misc_ops;
  1455. struct cdp_peer_ops *peer_ops;
  1456. struct cdp_ocb_ops *ocb_ops;
  1457. struct cdp_mob_stats_ops *mob_stats_ops;
  1458. struct cdp_pmf_ops *pmf_ops;
  1459. #endif
  1460. #ifdef DP_FLOW_CTL
  1461. struct cdp_cfg_ops *cfg_ops;
  1462. struct cdp_flowctl_ops *flowctl_ops;
  1463. struct cdp_lflowctl_ops *l_flowctl_ops;
  1464. struct cdp_throttle_ops *throttle_ops;
  1465. #endif
  1466. #ifdef DP_POWER_SAVE
  1467. struct cdp_bus_ops *bus_ops;
  1468. struct cdp_tx_delay_ops *delay_ops;
  1469. #endif
  1470. #ifdef IPA_OFFLOAD
  1471. struct cdp_ipa_ops *ipa_ops;
  1472. #endif
  1473. #ifdef RECEIVE_OFFLOAD
  1474. struct cdp_rx_offld_ops *rx_offld_ops;
  1475. #endif
  1476. #ifdef WLAN_FEATURE_PKT_CAPTURE
  1477. struct cdp_pktcapture_ops *pktcapture_ops;
  1478. #endif
  1479. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1480. struct cdp_cfr_ops *cfr_ops;
  1481. #endif
  1482. };
  1483. #endif