cdp_txrx_ops.h 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825
  1. /*
  2. * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #include <wmi_unified_api.h>
  32. #include <wdi_event_api.h>
  33. #ifdef IPA_OFFLOAD
  34. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  35. defined(CONFIG_IPA_WDI_UNIFIED_API)
  36. #include <qdf_ipa_wdi3.h>
  37. #else
  38. #include <qdf_ipa.h>
  39. #endif
  40. #endif
  41. /**
  42. * bitmap values to indicate special handling of peer_delete
  43. */
  44. #define CDP_PEER_DELETE_NO_SPECIAL 0
  45. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  46. struct hif_opaque_softc;
  47. /* same as ieee80211_nac_param */
  48. enum cdp_nac_param_cmd {
  49. /* IEEE80211_NAC_PARAM_ADD */
  50. CDP_NAC_PARAM_ADD = 1,
  51. /* IEEE80211_NAC_PARAM_DEL */
  52. CDP_NAC_PARAM_DEL,
  53. /* IEEE80211_NAC_PARAM_LIST */
  54. CDP_NAC_PARAM_LIST,
  55. };
  56. #define CDP_DELBA_INTERVAL_MS 3000
  57. /**
  58. * enum cdp_delba_rcode - CDP reason code for sending DELBA
  59. * @CDP_DELBA_REASON_NONE: None
  60. * @CDP_DELBA_2K_JUMP: Sending DELBA from 2k_jump_handle
  61. */
  62. enum cdp_delba_rcode {
  63. CDP_DELBA_REASON_NONE = 0,
  64. CDP_DELBA_2K_JUMP,
  65. };
  66. /**
  67. * enum vdev_peer_protocol_enter_exit - whether ingress or egress
  68. * @CDP_VDEV_PEER_PROTOCOL_IS_INGRESS: ingress
  69. * @CDP_VDEV_PEER_PROTOCOL_IS_EGRESS: egress
  70. *
  71. * whether ingress or egress
  72. */
  73. enum vdev_peer_protocol_enter_exit {
  74. CDP_VDEV_PEER_PROTOCOL_IS_INGRESS,
  75. CDP_VDEV_PEER_PROTOCOL_IS_EGRESS
  76. };
  77. /**
  78. * enum vdev_peer_protocol_tx_rx - whether tx or rx
  79. * @CDP_VDEV_PEER_PROTOCOL_IS_TX: tx
  80. * @CDP_VDEV_PEER_PROTOCOL_IS_RX: rx
  81. *
  82. * whether tx or rx
  83. */
  84. enum vdev_peer_protocol_tx_rx {
  85. CDP_VDEV_PEER_PROTOCOL_IS_TX,
  86. CDP_VDEV_PEER_PROTOCOL_IS_RX
  87. };
  88. /**
  89. * enum vdev_ll_conn_actions - Actions to informvdev about
  90. * low latency connection.
  91. * @CDP_VDEV_LL_CONN_ADD: Add Low latency connection
  92. * @CDP_VDEV_LL_CONN_DEL: Delete Low latency connection
  93. */
  94. enum vdev_ll_conn_actions {
  95. CDP_VDEV_LL_CONN_ADD,
  96. CDP_VDEV_LL_CONN_DEL
  97. };
  98. /******************************************************************************
  99. *
  100. * Control Interface (A Interface)
  101. *
  102. *****************************************************************************/
  103. struct cdp_cmn_ops {
  104. QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  105. int (*txrx_pdev_attach_target)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  106. QDF_STATUS (*txrx_vdev_attach)
  107. (struct cdp_soc_t *soc, uint8_t pdev_id, uint8_t *mac,
  108. uint8_t vdev_id, enum wlan_op_mode op_mode,
  109. enum wlan_op_subtype subtype);
  110. QDF_STATUS
  111. (*txrx_vdev_detach)(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
  112. ol_txrx_vdev_delete_cb callback,
  113. void *cb_context);
  114. QDF_STATUS (*txrx_pdev_attach)
  115. (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev,
  116. qdf_device_t osdev, uint8_t pdev_id);
  117. int (*txrx_pdev_post_attach)(struct cdp_soc_t *soc, uint8_t pdev_id);
  118. void
  119. (*txrx_pdev_pre_detach)(struct cdp_soc_t *soc, uint8_t pdev_id,
  120. int force);
  121. QDF_STATUS
  122. (*txrx_pdev_detach)(struct cdp_soc_t *psoc, uint8_t pdev_id,
  123. int force);
  124. /**
  125. * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
  126. * @soc: soc dp handle
  127. * @pdev_id: id of Dp pdev handle
  128. * @force: Force deinit or not
  129. *
  130. * Return: QDF_STATUS
  131. */
  132. QDF_STATUS
  133. (*txrx_pdev_deinit)(struct cdp_soc_t *soc, uint8_t pdev_id,
  134. int force);
  135. QDF_STATUS
  136. (*txrx_peer_create)
  137. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  138. uint8_t *peer_mac_addr);
  139. QDF_STATUS
  140. (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  141. uint8_t *peer_mac);
  142. QDF_STATUS
  143. (*txrx_cp_peer_del_response)
  144. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  145. uint8_t *peer_mac_addr);
  146. QDF_STATUS
  147. (*txrx_peer_teardown)
  148. (struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *peer_mac);
  149. int (*txrx_peer_add_ast)
  150. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  151. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  152. uint32_t flags);
  153. int (*txrx_peer_update_ast)
  154. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  155. uint8_t *mac_addr, uint32_t flags);
  156. bool (*txrx_peer_get_ast_info_by_soc)
  157. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  158. struct cdp_ast_entry_info *ast_entry_info);
  159. bool (*txrx_peer_get_ast_info_by_pdev)
  160. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  161. uint8_t pdev_id,
  162. struct cdp_ast_entry_info *ast_entry_info);
  163. QDF_STATUS (*txrx_peer_ast_delete_by_soc)
  164. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  165. txrx_ast_free_cb callback,
  166. void *cookie);
  167. QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
  168. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  169. uint8_t pdev_id,
  170. txrx_ast_free_cb callback,
  171. void *cookie);
  172. QDF_STATUS
  173. (*txrx_peer_delete)(struct cdp_soc_t *soc, uint8_t vdev_id,
  174. uint8_t *peer_mac, uint32_t bitmap);
  175. QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_soc_t *soc,
  176. uint8_t vdev_id,
  177. uint8_t smart_monitor);
  178. void (*txrx_peer_delete_sync)(struct cdp_soc_t *soc, uint8_t vdev_id,
  179. uint8_t *peer_mac,
  180. QDF_STATUS(*delete_cb)(
  181. uint8_t vdev_id,
  182. uint32_t peerid_cnt,
  183. uint16_t *peerid_list),
  184. uint32_t bitmap);
  185. void (*txrx_peer_unmap_sync_cb_set)(struct cdp_soc_t *soc_hdl,
  186. uint8_t pdev_id,
  187. ol_txrx_peer_unmap_sync_cb
  188. peer_unmap_sync);
  189. QDF_STATUS
  190. (*txrx_get_peer_mac_from_peer_id)
  191. (struct cdp_soc_t *cdp_soc,
  192. uint32_t peer_id, uint8_t *peer_mac);
  193. void
  194. (*txrx_vdev_tx_lock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  195. void
  196. (*txrx_vdev_tx_unlock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  197. QDF_STATUS
  198. (*txrx_ath_getstats)(struct cdp_soc_t *soc, uint8_t id,
  199. struct cdp_dev_stats *stats, uint8_t type);
  200. QDF_STATUS
  201. (*txrx_set_gid_flag)(struct cdp_soc_t *soc, uint8_t pdev_id,
  202. u_int8_t *mem_status,
  203. u_int8_t *user_position);
  204. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_soc_t *soc,
  205. uint8_t pdev_id);
  206. QDF_STATUS
  207. (*txrx_if_mgmt_drain)(struct cdp_soc_t *soc, uint8_t pdev_id,
  208. int force);
  209. QDF_STATUS
  210. (*txrx_set_curchan)(struct cdp_soc_t *soc, uint8_t pdev_id,
  211. uint32_t chan_mhz);
  212. QDF_STATUS
  213. (*txrx_set_privacy_filters)
  214. (struct cdp_soc_t *soc, uint8_t vdev_id, void *filter,
  215. uint32_t num);
  216. uint32_t (*txrx_get_cfg)(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg);
  217. /********************************************************************
  218. * Data Interface (B Interface)
  219. ********************************************************************/
  220. QDF_STATUS
  221. (*txrx_vdev_register)(struct cdp_soc_t *soc, uint8_t vdev_id,
  222. ol_osif_vdev_handle osif_vdev,
  223. struct ol_txrx_ops *txrx_ops);
  224. int (*txrx_mgmt_send)(struct cdp_soc_t *soc, uint8_t vdev_id,
  225. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  226. int (*txrx_mgmt_send_ext)(struct cdp_soc_t *soc, uint8_t vdev_id,
  227. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  228. uint8_t use_6mbps, uint16_t chanfreq);
  229. /**
  230. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  231. * callback function
  232. */
  233. QDF_STATUS
  234. (*txrx_mgmt_tx_cb_set)(struct cdp_soc_t *soc, uint8_t pdev_id,
  235. uint8_t type,
  236. ol_txrx_mgmt_tx_cb download_cb,
  237. ol_txrx_mgmt_tx_cb ota_ack_cb,
  238. void *ctxt);
  239. /**
  240. * ol_txrx_data_tx_cb - Function registered with the data path
  241. * that is called when tx frames marked as "no free" are
  242. * done being transmitted
  243. */
  244. void (*txrx_data_tx_cb_set)(struct cdp_soc_t *soc, uint8_t vdev_id,
  245. ol_txrx_data_tx_cb callback, void *ctxt);
  246. qdf_nbuf_t (*tx_send_exc)
  247. (ol_txrx_soc_handle soc, uint8_t vdev_id, qdf_nbuf_t msdu_list,
  248. struct cdp_tx_exception_metadata *tx_exc_metadata);
  249. /*******************************************************************
  250. * Statistics and Debugging Interface (C Interface)
  251. ********************************************************************/
  252. int (*txrx_aggr_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  253. int max_subfrms_ampdu,
  254. int max_subfrms_amsdu);
  255. A_STATUS
  256. (*txrx_fw_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  257. struct ol_txrx_stats_req *req,
  258. bool per_vdev, bool response_expected);
  259. int (*txrx_debug)(struct cdp_soc_t *soc, uint8_t vdev_id,
  260. int debug_specs);
  261. QDF_STATUS
  262. (*txrx_fw_stats_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  263. uint8_t cfg_stats_type, uint32_t cfg_val);
  264. void (*txrx_print_level_set)(unsigned level);
  265. /**
  266. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  267. * @soc: datapath soc handle
  268. * @vdev_id: vdev id
  269. *
  270. * Return: vdev mac address
  271. */
  272. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_soc_t *soc,
  273. uint8_t vdev_id);
  274. /**
  275. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  276. * @soc: datapath soc handle
  277. * @vdev_id: vdev id
  278. *
  279. * Return: Handle to control pdev
  280. */
  281. struct cdp_cfg *(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_soc_t *soc,
  282. uint8_t vdev_id);
  283. /**
  284. * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
  285. * @soc: datapath soc handle
  286. * @pdev: pdev id
  287. *
  288. * Return: vdev_id
  289. */
  290. uint8_t (*txrx_get_mon_vdev_from_pdev)(struct cdp_soc_t *soc,
  291. uint8_t pdev_id);
  292. void (*txrx_soc_detach)(struct cdp_soc_t *soc);
  293. /**
  294. * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
  295. * @soc: Opaque Dp handle
  296. *
  297. * Return None
  298. */
  299. void (*txrx_soc_deinit)(struct cdp_soc_t *soc);
  300. /**
  301. * txrx_soc_init() - Initialize dp soc and dp ring memory
  302. * @soc: Opaque Dp handle
  303. * @ctrl_psoc: Opaque Cp handle
  304. * @htchdl: Opaque htc handle
  305. * @hifhdl: Opaque hif handle
  306. *
  307. * Return: None
  308. */
  309. void *(*txrx_soc_init)(struct cdp_soc_t *soc,
  310. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  311. struct hif_opaque_softc *hif_handle,
  312. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  313. struct ol_if_ops *ol_ops, uint16_t device_id);
  314. QDF_STATUS (*txrx_pdev_init)(ol_txrx_soc_handle soc,
  315. HTC_HANDLE htc_handle,
  316. qdf_device_t qdf_osdev,
  317. uint8_t pdev_id);
  318. /**
  319. * txrx_tso_soc_attach() - TSO attach handler triggered during
  320. * dynamic tso activation
  321. * @soc: Opaque Dp handle
  322. *
  323. * Return: QDF status
  324. */
  325. QDF_STATUS (*txrx_tso_soc_attach)(struct cdp_soc_t *soc);
  326. /**
  327. * txrx_tso_soc_detach() - TSO detach handler triggered during
  328. * dynamic tso de-activation
  329. * @soc: Opaque Dp handle
  330. *
  331. * Return: QDF status
  332. */
  333. QDF_STATUS (*txrx_tso_soc_detach)(struct cdp_soc_t *soc);
  334. int (*addba_resp_tx_completion)(struct cdp_soc_t *cdp_soc,
  335. uint8_t *peer_mac,
  336. uint16_t vdev_id, uint8_t tid,
  337. int status);
  338. int (*addba_requestprocess)(struct cdp_soc_t *cdp_soc,
  339. uint8_t *peer_mac,
  340. uint16_t vdev_id,
  341. uint8_t dialogtoken,
  342. uint16_t tid, uint16_t batimeout,
  343. uint16_t buffersize,
  344. uint16_t startseqnum);
  345. QDF_STATUS
  346. (*addba_responsesetup)(struct cdp_soc_t *cdp_soc,
  347. uint8_t *peer_mac,
  348. uint16_t vdev_id, uint8_t tid,
  349. uint8_t *dialogtoken, uint16_t *statuscode,
  350. uint16_t *buffersize, uint16_t *batimeout);
  351. int (*delba_process)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  352. uint16_t vdev_id, int tid, uint16_t reasoncode);
  353. /**
  354. * delba_tx_completion() - Indicate delba tx status
  355. * @cdp_soc: soc handle
  356. * @peer_mac: Peer mac address
  357. * @vdev_id: vdev id
  358. * @tid: Tid number
  359. * @status: Tx completion status
  360. *
  361. * Return: 0 on Success, 1 on failure
  362. */
  363. int (*delba_tx_completion)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  364. uint16_t vdev_id,
  365. uint8_t tid, int status);
  366. QDF_STATUS
  367. (*set_addba_response)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  368. uint16_t vdev_id, uint8_t tid,
  369. uint16_t statuscode);
  370. QDF_STATUS
  371. (*set_vdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  372. uint8_t vdev_id, uint8_t map_id);
  373. int (*txrx_get_total_per)(struct cdp_soc_t *soc, uint8_t pdev_id);
  374. void (*flush_cache_rx_queue)(void);
  375. QDF_STATUS (*set_pdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  376. uint8_t pdev_id,
  377. uint8_t map_id,
  378. uint8_t tos, uint8_t tid);
  379. QDF_STATUS (*txrx_stats_request)(struct cdp_soc_t *soc_handle,
  380. uint8_t vdev_id,
  381. struct cdp_txrx_stats_req *req);
  382. QDF_STATUS (*display_stats)(struct cdp_soc_t *psoc, uint16_t value,
  383. enum qdf_stats_verbosity_level level);
  384. QDF_STATUS (*txrx_intr_attach)(struct cdp_soc_t *soc_handle);
  385. void (*txrx_intr_detach)(struct cdp_soc_t *soc_handle);
  386. QDF_STATUS (*set_pn_check)(struct cdp_soc_t *soc_handle,
  387. uint8_t vdev_id, uint8_t *peermac,
  388. enum cdp_sec_type sec_type,
  389. uint32_t *rx_pn);
  390. QDF_STATUS(*set_key_sec_type)(struct cdp_soc_t *soc_handle,
  391. uint8_t vdev_id, uint8_t *peermac,
  392. enum cdp_sec_type sec_type,
  393. bool is_unicast);
  394. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  395. struct cdp_config_params *params);
  396. void *(*get_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  397. void (*set_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  398. void *dp_hdl);
  399. void *(*get_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  400. uint8_t vdev_id);
  401. QDF_STATUS (*set_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  402. uint8_t vdev_id,
  403. uint16_t size);
  404. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  405. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  406. void *dp_txrx_handle);
  407. QDF_STATUS (*map_pdev_to_lmac)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  408. uint32_t lmac_id);
  409. QDF_STATUS (*handle_mode_change)(ol_txrx_soc_handle soc,
  410. uint8_t pdev_id, uint32_t lmac_id);
  411. QDF_STATUS (*set_pdev_status_down)(struct cdp_soc_t *soc_handle,
  412. uint8_t pdev_id, bool is_pdev_down);
  413. QDF_STATUS (*txrx_peer_reset_ast)
  414. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
  415. uint8_t *peer_macaddr, uint8_t vdev_id);
  416. QDF_STATUS (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  417. uint8_t vdev_id);
  418. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  419. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  420. uint8_t ac, uint32_t value);
  421. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  422. uint8_t ac, uint32_t *value);
  423. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  424. uint32_t num_peers,
  425. uint32_t max_ast_index,
  426. bool peer_map_unmap_v2);
  427. QDF_STATUS (*set_soc_param)(ol_txrx_soc_handle soc,
  428. enum cdp_soc_param_t param,
  429. uint32_t value);
  430. ol_txrx_tx_fp tx_send;
  431. /**
  432. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  433. * to deliver pkt to stack.
  434. * @soc: datapath soc handle
  435. * @vdev: vdev id
  436. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  437. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  438. */
  439. void (*txrx_get_os_rx_handles_from_vdev)
  440. (ol_txrx_soc_handle soc,
  441. uint8_t vdev_id,
  442. ol_txrx_rx_fp *stack_fn,
  443. ol_osif_vdev_handle *osif_vdev);
  444. void (*set_rate_stats_ctx)(struct cdp_soc_t *soc,
  445. void *ctx);
  446. int (*txrx_classify_update)
  447. (struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t skb,
  448. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  449. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  450. enum cdp_capabilities dp_caps);
  451. void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
  452. QDF_STATUS (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
  453. uint8_t pdev_id,
  454. void *buf);
  455. void* (*txrx_peer_get_rdkstats_ctx)(struct cdp_soc_t *soc,
  456. uint8_t vdev_id,
  457. uint8_t *mac_addr);
  458. QDF_STATUS (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
  459. uint8_t pdev_id);
  460. QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_soc_t *soc,
  461. uint8_t pdev_id,
  462. uint8_t pcp, uint8_t tid);
  463. QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_soc_t *soc,
  464. uint8_t vdev_id,
  465. uint8_t pcp, uint8_t tid);
  466. #ifdef QCA_MULTIPASS_SUPPORT
  467. QDF_STATUS (*set_vlan_groupkey)(struct cdp_soc_t *soc, uint8_t vdev_id,
  468. uint16_t vlan_id, uint16_t group_key);
  469. #endif
  470. uint16_t (*get_peer_mac_list)
  471. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  472. u_int8_t newmac[][QDF_MAC_ADDR_SIZE], uint16_t mac_cnt,
  473. bool limit);
  474. #ifdef QCA_SUPPORT_WDS_EXTENDED
  475. uint16_t (*get_wds_ext_peer_id)(ol_txrx_soc_handle soc,
  476. uint8_t vdev_id,
  477. uint8_t *mac);
  478. QDF_STATUS (*set_wds_ext_peer_rx)(ol_txrx_soc_handle soc,
  479. uint8_t vdev_id,
  480. uint8_t *mac,
  481. ol_txrx_rx_fp rx,
  482. ol_osif_peer_handle osif_peer);
  483. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  484. void (*txrx_drain)(ol_txrx_soc_handle soc);
  485. };
  486. struct cdp_ctrl_ops {
  487. int
  488. (*txrx_mempools_attach)(ol_txrx_soc_handle dp_soc);
  489. int
  490. (*txrx_update_filter_neighbour_peers)(
  491. struct cdp_soc_t *soc, uint8_t vdev_id,
  492. uint32_t cmd, uint8_t *macaddr);
  493. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  494. /**
  495. * @brief Update the authorize peer object at association time
  496. * @details
  497. * For the host-based implementation of rate-control, it
  498. * updates the peer/node-related parameters within rate-control
  499. * context of the peer at association.
  500. *
  501. * @param soc_hdl - pointer to the soc object
  502. * @param vdev_id - id of the virtual object
  503. * @param peer_mac - mac address of the node's object
  504. * @authorize - either to authorize or unauthorize peer
  505. *
  506. * @return QDF_STATUS
  507. */
  508. QDF_STATUS
  509. (*txrx_peer_authorize)(struct cdp_soc_t *soc_hdl,
  510. uint8_t vdev_id,
  511. uint8_t *peer_mac,
  512. u_int32_t authorize);
  513. void (*tx_flush_buffers)(struct cdp_soc_t *soc, uint8_t vdev_id);
  514. int (*txrx_is_target_ar900b)(struct cdp_soc_t *soc_hdl);
  515. QDF_STATUS
  516. (*txrx_set_vdev_param)(struct cdp_soc_t *soc, uint8_t vdev_id,
  517. enum cdp_vdev_param_type param,
  518. cdp_config_param_type val);
  519. /**
  520. * @brief Set the reo dest ring num of the radio
  521. * @details
  522. * Set the reo destination ring no on which we will receive
  523. * pkts for this radio.
  524. *
  525. * @txrx_soc - soc handle
  526. * @param pdev_id - id of physical device
  527. * @return the reo destination ring number
  528. * @param reo_dest_ring_num - value ranges between 1 - 4
  529. */
  530. QDF_STATUS (*txrx_set_pdev_reo_dest)(
  531. struct cdp_soc_t *txrx_soc,
  532. uint8_t pdev_id,
  533. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  534. /**
  535. * @brief Get the reo dest ring num of the radio
  536. * @details
  537. * Get the reo destination ring no on which we will receive
  538. * pkts for this radio.
  539. *
  540. * @txrx_soc - soc handle
  541. * @param pdev_id - id of physical device
  542. * @return the reo destination ring number
  543. */
  544. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  545. struct cdp_soc_t *txrx_soc,
  546. uint8_t pdev_id);
  547. int (*txrx_wdi_event_sub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  548. wdi_event_subscribe *event_cb_sub,
  549. uint32_t event);
  550. int (*txrx_wdi_event_unsub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  551. wdi_event_subscribe *event_cb_sub,
  552. uint32_t event);
  553. int (*txrx_get_sec_type)(ol_txrx_soc_handle soc, uint8_t vdev_id,
  554. uint8_t *peer_mac, uint8_t sec_idx);
  555. QDF_STATUS
  556. (*txrx_update_mgmt_txpow_vdev)(struct cdp_soc_t *soc,
  557. uint8_t vdev_id,
  558. uint8_t subtype, uint8_t tx_power);
  559. /**
  560. * txrx_set_pdev_param() - callback to set pdev parameter
  561. * @soc: opaque soc handle
  562. * @pdev_id:id of data path pdev handle
  563. * @val: value of pdev_tx_capture
  564. *
  565. * Return: status: 0 - Success, non-zero: Failure
  566. */
  567. QDF_STATUS (*txrx_set_pdev_param)(struct cdp_soc_t *soc,
  568. uint8_t pdev_id,
  569. enum cdp_pdev_param_type type,
  570. cdp_config_param_type val);
  571. QDF_STATUS (*txrx_get_pdev_param)(struct cdp_soc_t *soc,
  572. uint8_t pdev_id,
  573. enum cdp_pdev_param_type type,
  574. cdp_config_param_type *val);
  575. QDF_STATUS (*txrx_set_peer_param)(struct cdp_soc_t *soc,
  576. uint8_t vdev_id, uint8_t *peer_mac,
  577. enum cdp_peer_param_type param,
  578. cdp_config_param_type val);
  579. QDF_STATUS (*txrx_get_peer_param)(struct cdp_soc_t *soc,
  580. uint8_t vdev_id, uint8_t *peer_mac,
  581. enum cdp_peer_param_type param,
  582. cdp_config_param_type *val);
  583. void * (*txrx_get_pldev)(struct cdp_soc_t *soc, uint8_t pdev_id);
  584. void (*txrx_peer_flush_frags)(struct cdp_soc_t *soc, uint8_t vdev_id,
  585. uint8_t *peer_mac);
  586. #ifdef VDEV_PEER_PROTOCOL_COUNT
  587. void (*txrx_peer_protocol_cnt)(struct cdp_soc_t *soc,
  588. int8_t vdev_id,
  589. qdf_nbuf_t nbuf,
  590. bool is_egress,
  591. bool is_rx);
  592. #endif
  593. #ifdef ATH_SUPPORT_NAC_RSSI
  594. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_soc_t *cdp_soc,
  595. uint8_t vdev_id,
  596. enum cdp_nac_param_cmd cmd,
  597. char *bssid,
  598. char *client_macaddr,
  599. uint8_t chan_num);
  600. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_soc_t *cdp_soc,
  601. uint8_t vdev_id,
  602. char *macaddr,
  603. uint8_t *rssi);
  604. #endif
  605. #ifdef WLAN_SUPPORT_SCS
  606. QDF_STATUS
  607. (*txrx_enable_scs_params) (
  608. struct cdp_soc_t *soc, struct qdf_mac_addr
  609. *macaddr,
  610. uint8_t vdev_id,
  611. bool is_active);
  612. QDF_STATUS
  613. (*txrx_record_scs_params) (
  614. struct cdp_soc_t *soc, struct qdf_mac_addr
  615. *macaddr,
  616. uint8_t vdev_id,
  617. struct cdp_scs_params *scs_params,
  618. uint8_t entry_ctr,
  619. uint8_t scs_sessions);
  620. #endif
  621. #ifdef WLAN_SUPPORT_MSCS
  622. QDF_STATUS
  623. (*txrx_record_mscs_params) (
  624. struct cdp_soc_t *soc, uint8_t *macaddr,
  625. uint8_t vdev_id,
  626. struct cdp_mscs_params *mscs_params,
  627. bool active);
  628. #endif
  629. QDF_STATUS
  630. (*set_key)(struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *mac,
  631. bool is_unicast, uint32_t *key);
  632. QDF_STATUS (*txrx_get_vdev_param)(struct cdp_soc_t *soc,
  633. uint8_t vdev_id,
  634. enum cdp_vdev_param_type param,
  635. cdp_config_param_type *val);
  636. int (*enable_peer_based_pktlog)(struct cdp_soc_t *cdp_soc,
  637. uint8_t pdev_id,
  638. uint8_t *macaddr, uint8_t enb_dsb);
  639. QDF_STATUS
  640. (*calculate_delay_stats)(struct cdp_soc_t *cdp_soc,
  641. uint8_t vdev_id, qdf_nbuf_t nbuf);
  642. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  643. QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
  644. struct cdp_soc_t *soc, uint8_t pdev_id,
  645. uint32_t protocol_mask, uint16_t protocol_type,
  646. uint16_t tag);
  647. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  648. void (*txrx_dump_pdev_rx_protocol_tag_stats)(
  649. struct cdp_soc_t *soc, uint8_t pdev_id,
  650. uint16_t protocol_type);
  651. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  652. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  653. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  654. QDF_STATUS (*txrx_set_rx_flow_tag)(
  655. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  656. struct cdp_rx_flow_info *flow_info);
  657. QDF_STATUS (*txrx_dump_rx_flow_tag_stats)(
  658. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  659. struct cdp_rx_flow_info *flow_info);
  660. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  661. #ifdef QCA_MULTIPASS_SUPPORT
  662. void (*txrx_peer_set_vlan_id)(struct cdp_soc_t *cdp_soc,
  663. uint8_t vdev_id, uint8_t *peer_mac,
  664. uint16_t vlan_id);
  665. #endif
  666. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  667. QDF_STATUS (*txrx_update_peer_pkt_capture_params)(
  668. ol_txrx_soc_handle soc, uint8_t pdev_id,
  669. bool is_rx_pkt_cap_enable, uint8_t is_tx_pkt_cap_enable,
  670. uint8_t *peer_mac);
  671. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  672. QDF_STATUS
  673. (*txrx_set_psoc_param)(struct cdp_soc_t *soc,
  674. enum cdp_psoc_param_type param,
  675. cdp_config_param_type val);
  676. QDF_STATUS (*txrx_get_psoc_param)(ol_txrx_soc_handle soc,
  677. enum cdp_psoc_param_type type,
  678. cdp_config_param_type *val);
  679. #ifdef VDEV_PEER_PROTOCOL_COUNT
  680. /*
  681. * Enable per-peer protocol counters
  682. */
  683. void (*txrx_enable_peer_protocol_count)(struct cdp_soc_t *soc,
  684. int8_t vdev_id, bool enable);
  685. void (*txrx_set_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  686. int8_t vdev_id, int mask);
  687. int (*txrx_is_peer_protocol_count_enabled)(struct cdp_soc_t *soc,
  688. int8_t vdev_id);
  689. int (*txrx_get_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  690. int8_t vdev_id);
  691. #endif
  692. };
  693. struct cdp_me_ops {
  694. void (*tx_me_alloc_descriptor)(struct cdp_soc_t *soc,
  695. uint8_t pdev_id);
  696. void (*tx_me_free_descriptor)(struct cdp_soc_t *soc, uint8_t pdev_id);
  697. uint16_t (*tx_me_convert_ucast)(struct cdp_soc_t *soc, uint8_t vdev_id,
  698. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  699. uint8_t newmaccnt, uint8_t tid,
  700. bool is_igmp);
  701. };
  702. struct cdp_mon_ops {
  703. QDF_STATUS (*txrx_reset_monitor_mode)
  704. (ol_txrx_soc_handle soc, uint8_t pdev_id, u_int8_t smart_monitor);
  705. QDF_STATUS (*txrx_deliver_tx_mgmt)
  706. (struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf);
  707. /* HK advance monitor filter support */
  708. QDF_STATUS (*txrx_set_advance_monitor_filter)
  709. (struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  710. struct cdp_monitor_filter *filter_val);
  711. /* Configure full monitor mode */
  712. QDF_STATUS
  713. (*config_full_mon_mode)(struct cdp_soc_t *soc, uint8_t val);
  714. };
  715. struct cdp_host_stats_ops {
  716. int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  717. struct ol_txrx_stats_req *req);
  718. QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc,
  719. uint8_t vdev_id);
  720. QDF_STATUS
  721. (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  722. int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id,
  723. struct cdp_stats_extd *buf);
  724. /**
  725. * @brief Enable enhanced stats functionality.
  726. *
  727. * @param soc - the soc handle
  728. * @param pdev_id - pdev_id of pdev
  729. * @return - QDF_STATUS
  730. */
  731. QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc,
  732. uint8_t pdev_id);
  733. /**
  734. * @brief Disable enhanced stats functionality.
  735. *
  736. * @param soc - the soc handle
  737. * @param pdev_id - pdev_id of pdev
  738. * @return - QDF_STATUS
  739. */
  740. QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc,
  741. uint8_t pdev_id);
  742. QDF_STATUS
  743. (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  744. QDF_STATUS
  745. (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  746. QDF_STATUS
  747. (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  748. QDF_STATUS
  749. (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  750. QDF_STATUS
  751. (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  752. QDF_STATUS
  753. (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  754. QDF_STATUS
  755. (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  756. QDF_STATUS
  757. (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr);
  758. int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  759. struct ol_txrx_stats_req *req);
  760. int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc,
  761. uint8_t pdev_id,
  762. uint8_t *addr, void *stats,
  763. uint32_t last_tx_rate_mcs,
  764. uint32_t stats_id);
  765. QDF_STATUS
  766. (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  767. uint8_t *addr,
  768. uint32_t cap, uint32_t copy_stats);
  769. QDF_STATUS
  770. (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  771. void *data,
  772. uint32_t data_len);
  773. QDF_STATUS
  774. (*txrx_update_pdev_stats)(struct cdp_soc_t *soc,
  775. uint8_t pdev_id, void *data,
  776. uint16_t stats_id);
  777. QDF_STATUS
  778. (*txrx_get_peer_stats_param)(struct cdp_soc_t *soc,
  779. uint8_t vdev_id,
  780. uint8_t *peer_mac,
  781. enum cdp_peer_stats_type type,
  782. cdp_peer_stats_param_t *buf);
  783. QDF_STATUS
  784. (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  785. uint8_t *peer_mac,
  786. struct cdp_peer_stats *peer_stats);
  787. QDF_STATUS
  788. (*txrx_get_soc_stats)(struct cdp_soc_t *soc,
  789. struct cdp_soc_stats *soc_stats);
  790. QDF_STATUS
  791. (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc,
  792. uint8_t vdev_id,
  793. uint8_t *peer_mac);
  794. QDF_STATUS
  795. (*txrx_reset_peer_stats)(struct cdp_soc_t *soc,
  796. uint8_t vdev_id, uint8_t *peer_mac);
  797. int
  798. (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  799. void *buf, bool is_aggregate);
  800. int
  801. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  802. void *data, uint32_t len,
  803. uint32_t stats_id);
  804. int
  805. (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc,
  806. uint8_t vdev_id,
  807. wmi_host_vdev_extd_stats *buffer);
  808. QDF_STATUS
  809. (*txrx_update_vdev_stats)(struct cdp_soc_t *soc,
  810. uint8_t vdev_id, void *buf,
  811. uint16_t stats_id);
  812. int
  813. (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  814. void *buf);
  815. QDF_STATUS
  816. (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  817. struct cdp_pdev_stats *buf);
  818. int
  819. (*txrx_get_ratekbps)(int preamb, int mcs,
  820. int htflag, int gintval);
  821. QDF_STATUS
  822. (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  823. uint8_t *peer_mac, void *stats,
  824. uint32_t last_tx_rate_mcs,
  825. uint32_t stats_id);
  826. };
  827. struct cdp_wds_ops {
  828. QDF_STATUS
  829. (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id,
  830. u_int32_t val);
  831. QDF_STATUS
  832. (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc,
  833. uint8_t vdev_id, uint8_t *peer_mac,
  834. int wds_tx_ucast, int wds_tx_mcast);
  835. int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id,
  836. uint32_t val);
  837. };
  838. struct cdp_raw_ops {
  839. QDF_STATUS
  840. (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id,
  841. qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast);
  842. };
  843. #ifdef PEER_FLOW_CONTROL
  844. struct cdp_pflow_ops {
  845. uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc,
  846. uint8_t pdev_id,
  847. enum _dp_param_t,
  848. uint32_t, void *);
  849. };
  850. #endif /* PEER_FLOW_CONTROL */
  851. #define LRO_IPV4_SEED_ARR_SZ 5
  852. #define LRO_IPV6_SEED_ARR_SZ 11
  853. /**
  854. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  855. * @lro_enable: indicates whether rx_offld is enabled
  856. * @tcp_flag: If the TCP flags from the packet do not match
  857. * the values in this field after masking with TCP flags mask
  858. * below, packet is not rx_offld eligible
  859. * @tcp_flag_mask: field for comparing the TCP values provided
  860. * above with the TCP flags field in the received packet
  861. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  862. * 5-tuple toeplitz hash for ipv4 packets
  863. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  864. * 5-tuple toeplitz hash for ipv6 packets
  865. */
  866. struct cdp_lro_hash_config {
  867. uint32_t lro_enable;
  868. uint32_t tcp_flag:9,
  869. tcp_flag_mask:9;
  870. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  871. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  872. };
  873. struct ol_if_ops {
  874. void
  875. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  876. uint8_t pdev_id, uint8_t *peer_macaddr,
  877. uint8_t vdev_id,
  878. bool hash_based, uint8_t ring_num);
  879. QDF_STATUS
  880. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  881. uint8_t pdev_id,
  882. uint8_t vdev_id, uint8_t *peer_mac,
  883. qdf_dma_addr_t hw_qdesc, int tid,
  884. uint16_t queue_num,
  885. uint8_t ba_window_size_valid,
  886. uint16_t ba_window_size);
  887. QDF_STATUS
  888. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  889. uint8_t pdev_id,
  890. uint8_t vdev_id, uint8_t *peer_macaddr,
  891. uint32_t tid_mask);
  892. int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc,
  893. uint8_t pdev_id,
  894. uint8_t *peer_mac,
  895. uint8_t *vdev_mac, enum wlan_op_mode opmode);
  896. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  897. int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  898. uint8_t vdev_id,
  899. uint8_t *peer_macaddr,
  900. uint16_t peer_id,
  901. const uint8_t *dest_macaddr,
  902. uint8_t *next_node_mac,
  903. uint32_t flags,
  904. uint8_t type);
  905. int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  906. uint8_t vdev_id,
  907. uint8_t *dest_macaddr,
  908. uint8_t *peer_macaddr,
  909. uint32_t flags);
  910. void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  911. uint8_t vdev_id,
  912. uint8_t *wds_macaddr,
  913. uint8_t type,
  914. uint8_t delete_in_fw);
  915. QDF_STATUS
  916. (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id,
  917. struct cdp_lro_hash_config *rx_offld_hash);
  918. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  919. uint8_t type);
  920. #ifdef FEATURE_NAC_RSSI
  921. uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc,
  922. uint8_t pdev_id, void *msg);
  923. #else
  924. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  925. #endif
  926. int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  927. uint16_t peer_id, uint16_t hw_peer_id,
  928. uint8_t vdev_id, uint8_t *peer_mac_addr,
  929. enum cdp_txrx_ast_entry_type peer_type,
  930. uint32_t tx_ast_hashidx);
  931. int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  932. uint16_t peer_id,
  933. uint8_t vdev_id);
  934. int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc,
  935. enum cdp_cfg_param_type param_num);
  936. void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc,
  937. uint8_t pdev_id,
  938. struct cdp_rx_mic_err_info *info);
  939. bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc,
  940. uint8_t vdev_id, uint8_t *peer_mac_addr,
  941. qdf_nbuf_t nbuf,
  942. uint16_t hdr_space);
  943. uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc,
  944. uint8_t pdev_id, uint16_t freq);
  945. uint8_t (*freq_to_band)(struct cdp_ctrl_objmgr_psoc *psoc,
  946. uint8_t pdev_id, uint16_t freq);
  947. #ifdef ATH_SUPPORT_NAC_RSSI
  948. int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  949. uint8_t pdev_id,
  950. u_int8_t vdev_id,
  951. enum cdp_nac_param_cmd cmd, char *bssid,
  952. char *client_macaddr, uint8_t chan_num);
  953. int
  954. (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  955. uint8_t pdev_id, u_int8_t vdev_id,
  956. enum cdp_nac_param_cmd cmd,
  957. char *bssid, char *client_mac);
  958. #endif
  959. int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc,
  960. uint16_t pdev_id, uint8_t *peer_macaddr);
  961. /**
  962. * send_delba() - Send delba to peer
  963. * @psoc: Objmgr soc handle
  964. * @vdev_id: dp vdev id
  965. * @peer_macaddr: Peer mac addr
  966. * @tid: Tid number
  967. * @reason_code: Reason code
  968. * @cdp_rcode: CDP reason code for sending DELBA
  969. *
  970. * Return: 0 for success, non-zero for failure
  971. */
  972. int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id,
  973. uint8_t *peer_macaddr, uint8_t tid,
  974. uint8_t reason_code, uint8_t cdp_rcode);
  975. int
  976. (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc,
  977. uint8_t vdev_id,
  978. uint8_t *dest_macaddr,
  979. uint8_t *peer_macaddr,
  980. uint32_t flags);
  981. int
  982. (*pdev_update_lmac_n_target_pdev_id)(struct cdp_ctrl_objmgr_psoc *psoc,
  983. uint8_t *pdev_id,
  984. uint8_t *lmac_id,
  985. uint8_t *target_pdev_id);
  986. bool (*is_roam_inprogress)(uint32_t vdev_id);
  987. enum QDF_GLOBAL_MODE (*get_con_mode)(void);
  988. #ifdef QCA_PEER_MULTIQ_SUPPORT
  989. int (*peer_ast_flowid_map)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  990. uint16_t peer_id, uint8_t vdev_id,
  991. uint8_t *peer_mac_addr);
  992. #endif
  993. #ifdef DP_MEM_PRE_ALLOC
  994. void *(*dp_prealloc_get_context)(uint32_t ctxt_type);
  995. QDF_STATUS(*dp_prealloc_put_context)(uint32_t ctxt_type, void *vaddr);
  996. void *(*dp_prealloc_get_consistent)(uint32_t *size,
  997. void **base_vaddr_unaligned,
  998. qdf_dma_addr_t *paddr_unaligned,
  999. qdf_dma_addr_t *paddr_aligned,
  1000. uint32_t align,
  1001. uint32_t ring_type);
  1002. void (*dp_prealloc_put_consistent)(qdf_size_t size,
  1003. void *vaddr_unligned,
  1004. qdf_dma_addr_t paddr);
  1005. void (*dp_get_multi_pages)(uint32_t desc_type,
  1006. size_t element_size,
  1007. uint16_t element_num,
  1008. struct qdf_mem_multi_page_t *pages,
  1009. bool cacheable);
  1010. void (*dp_put_multi_pages)(uint32_t desc_type,
  1011. struct qdf_mem_multi_page_t *pages);
  1012. #endif
  1013. int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle);
  1014. char *(*get_device_name)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1015. uint8_t pdev_id);
  1016. QDF_STATUS(*nss_stats_clr)(struct cdp_ctrl_objmgr_psoc *psoc,
  1017. uint8_t vdev_id);
  1018. int (*dp_rx_get_pending)(ol_txrx_soc_handle soc);
  1019. void (*dp_rx_sched_refill_thread)(ol_txrx_soc_handle soc);
  1020. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  1021. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1022. void (*rx_wds_ext_peer_learn)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  1023. uint16_t peer_id, uint8_t vdev_id,
  1024. uint8_t *peer_macaddr);
  1025. #endif /* QCA_SUPPORT_WDS_EXTENDED */
  1026. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1027. QDF_STATUS(*peer_update_mesh_latency_params)(
  1028. struct cdp_ctrl_objmgr_psoc *psoc,
  1029. uint8_t vdev_id, uint8_t *peer_mac, uint8_t tid,
  1030. uint32_t service_interval_dl, uint32_t burst_size_dl,
  1031. uint32_t service_interval_ul, uint32_t burst_size_ul,
  1032. uint8_t add_or_sub, uint8_t ac);
  1033. #endif
  1034. };
  1035. #ifdef DP_PEER_EXTENDED_API
  1036. /**
  1037. * struct cdp_misc_ops - mcl ops not classified
  1038. * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer
  1039. * @set_wmm_param: set wmm parameters
  1040. * @bad_peer_txctl_set_setting: configure bad peer tx limit setting
  1041. * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit
  1042. * @hl_tdls_flag_reset: reset tdls flag for vdev
  1043. * @tx_non_std: Allow the control-path SW to send data frames
  1044. * @get_vdev_id: get vdev id
  1045. * @set_wisa_mode: set wisa mode for a vdev
  1046. * @txrx_data_stall_cb_register: register data stall callback
  1047. * @txrx_data_stall_cb_deregister: deregister data stall callback
  1048. * @txrx_post_data_stall_event: post data stall event
  1049. * @runtime_suspend: ensure TXRX is ready to runtime suspend
  1050. * @runtime_resume: ensure TXRX is ready to runtime resume
  1051. * @get_opmode: get operation mode of vdev
  1052. * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for
  1053. marking first packet after wow wakeup
  1054. * @update_mac_id: update mac_id for vdev
  1055. * @flush_rx_frames: flush rx frames on the queue
  1056. * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that
  1057. has been forwarded from txrx layer
  1058. without going to upper layers
  1059. * @pkt_log_init: handler to initialize packet log
  1060. * @pkt_log_con_service: handler to connect packet log service
  1061. * @get_num_rx_contexts: handler to get number of RX contexts
  1062. * @register_packetdump_cb: register callback for different pktlog
  1063. * @unregister_packetdump_cb: unregister callback for different pktlog
  1064. * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag
  1065. * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag
  1066. *
  1067. * @vdev_inform_ll_conn: inform DP to add/delete a latency critical connection
  1068. * for this particular vdev.
  1069. * @set_swlm_enable: Enable or Disable Software Latency Manager.
  1070. * @is_swlm_enabled: Check if Software latency manager is enabled or not.
  1071. * @display_txrx_hw_info: Dump the DP rings info
  1072. *
  1073. * Function pointers for miscellaneous soc/pdev/vdev related operations.
  1074. */
  1075. struct cdp_misc_ops {
  1076. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl,
  1077. uint8_t vdev_id,
  1078. uint16_t timer_value_sec);
  1079. void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1080. struct ol_tx_wmm_param_t wmm_param);
  1081. void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl,
  1082. uint8_t pdev_id, int enable,
  1083. int period, int txq_limit);
  1084. void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl,
  1085. uint8_t pdev_id,
  1086. int level, int tput_thresh,
  1087. int tx_limit);
  1088. void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl,
  1089. uint8_t vdev_id, bool flag);
  1090. qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1091. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  1092. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  1093. uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl,
  1094. uint8_t vdev_id);
  1095. QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl,
  1096. uint8_t vdev_id, bool enable);
  1097. QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl,
  1098. uint8_t pdev_id,
  1099. data_stall_detect_cb cb);
  1100. QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl,
  1101. uint8_t pdev_id,
  1102. data_stall_detect_cb cb);
  1103. void (*txrx_post_data_stall_event)(
  1104. struct cdp_soc_t *soc_hdl,
  1105. enum data_stall_log_event_indicator indicator,
  1106. enum data_stall_log_event_type data_stall_type,
  1107. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  1108. enum data_stall_log_recovery_type recovery_type);
  1109. QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl,
  1110. uint8_t pdev_id);
  1111. QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl,
  1112. uint8_t pdev_id);
  1113. int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1114. void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl,
  1115. uint8_t pdev_id, uint8_t value);
  1116. void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1117. uint8_t mac_id);
  1118. void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1119. void *peer, bool drop);
  1120. A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl,
  1121. uint8_t vdev_id,
  1122. uint64_t *fwd_tx_packets,
  1123. uint64_t *fwd_rx_packets);
  1124. void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev,
  1125. void *scn);
  1126. void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl,
  1127. uint8_t pdev_id, void *scn);
  1128. int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl);
  1129. void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1130. ol_txrx_pktdump_cb tx_cb,
  1131. ol_txrx_pktdump_cb rx_cb);
  1132. void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl,
  1133. uint8_t pdev_id);
  1134. void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl,
  1135. uint8_t pdev_id);
  1136. void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl,
  1137. uint8_t vdev_id,
  1138. unsigned long rx_packets,
  1139. uint32_t time_in_ms,
  1140. uint32_t high_th,
  1141. uint32_t low_th);
  1142. void (*vdev_set_bundle_require_flag)(uint8_t vdev_id,
  1143. unsigned long tx_bytes,
  1144. uint32_t time_in_ms,
  1145. uint32_t high_th,
  1146. uint32_t low_th);
  1147. void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl,
  1148. uint8_t pdev_id);
  1149. QDF_STATUS (*txrx_ext_stats_request)(struct cdp_soc_t *soc_hdl,
  1150. uint8_t pdev_id,
  1151. struct cdp_txrx_ext_stats *req);
  1152. QDF_STATUS (*request_rx_hw_stats)(struct cdp_soc_t *soc_hdl,
  1153. uint8_t vdev_id);
  1154. void (*reset_rx_hw_ext_stats)(struct cdp_soc_t *soc_hdl);
  1155. QDF_STATUS (*vdev_inform_ll_conn)(struct cdp_soc_t *soc_hdl,
  1156. uint8_t vdev_id,
  1157. enum vdev_ll_conn_actions action);
  1158. QDF_STATUS (*set_swlm_enable)(struct cdp_soc_t *soc_hdl,
  1159. uint8_t val);
  1160. uint8_t (*is_swlm_enabled)(struct cdp_soc_t *soc_hdl);
  1161. void (*display_txrx_hw_info)(struct cdp_soc_t *soc_hdl);
  1162. };
  1163. /**
  1164. * struct cdp_ocb_ops - mcl ocb ops
  1165. * @set_ocb_chan_info: set OCB channel info
  1166. * @get_ocb_chan_info: get OCB channel info
  1167. *
  1168. * Function pointers for operations related to OCB.
  1169. */
  1170. struct cdp_ocb_ops {
  1171. void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1172. struct ol_txrx_ocb_set_chan ocb_set_chan);
  1173. struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)(
  1174. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1175. };
  1176. /**
  1177. * struct cdp_peer_ops - mcl peer related ops
  1178. * @register_peer:
  1179. * @clear_peer:
  1180. * @find_peer_exist
  1181. * @find_peer_exist_on_vdev
  1182. * @find_peer_exist_on_other_vdev
  1183. * @peer_state_update:
  1184. * @get_vdevid:
  1185. * @register_ocb_peer:
  1186. * @peer_get_peer_mac_addr:
  1187. * @get_peer_state:
  1188. * @update_ibss_add_peer_num_of_vdev:
  1189. * @copy_mac_addr_raw:
  1190. * @add_last_real_peer:
  1191. * @is_vdev_restore_last_peer:
  1192. * @update_last_real_peer:
  1193. */
  1194. struct cdp_peer_ops {
  1195. QDF_STATUS (*register_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1196. struct ol_txrx_desc_type *sta_desc);
  1197. QDF_STATUS (*clear_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1198. struct qdf_mac_addr peer_addr);
  1199. bool (*find_peer_exist)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1200. uint8_t *peer_addr);
  1201. bool (*find_peer_exist_on_vdev)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1202. uint8_t *peer_addr);
  1203. bool (*find_peer_exist_on_other_vdev)(struct cdp_soc_t *soc,
  1204. uint8_t vdev_id,
  1205. uint8_t *peer_addr,
  1206. uint16_t max_bssid);
  1207. QDF_STATUS (*peer_state_update)(struct cdp_soc_t *soc,
  1208. uint8_t *peer_addr,
  1209. enum ol_txrx_peer_state state);
  1210. QDF_STATUS (*get_vdevid)(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  1211. uint8_t *vdev_id);
  1212. struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev,
  1213. struct qdf_mac_addr peer_addr);
  1214. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr);
  1215. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  1216. int (*get_peer_state)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1217. uint8_t *peer_mac);
  1218. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  1219. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_soc_t *soc,
  1220. uint8_t vdev_id,
  1221. int16_t peer_num_delta);
  1222. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  1223. ol_txrx_vdev_peer_remove_cb callback,
  1224. void *callback_context, bool remove_last_peer);
  1225. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  1226. ol_txrx_vdev_peer_remove_cb callback,
  1227. void *callback_context);
  1228. void (*copy_mac_addr_raw)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1229. uint8_t *bss_addr);
  1230. void (*add_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1231. uint8_t vdev_id);
  1232. bool (*is_vdev_restore_last_peer)(struct cdp_soc_t *soc,
  1233. uint8_t vdev_id,
  1234. uint8_t *peer_mac);
  1235. void (*update_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1236. uint8_t vdev_id, bool restore_last_peer);
  1237. void (*peer_detach_force_delete)(struct cdp_soc_t *soc_hdl,
  1238. uint8_t vdev_id, uint8_t *peer_addr);
  1239. void (*set_tdls_offchan_enabled)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1240. uint8_t *peer_mac, bool val);
  1241. void (*set_peer_as_tdls_peer)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1242. uint8_t *peer_mac, bool val);
  1243. void (*peer_flush_frags)(struct cdp_soc_t *soc_hdl,
  1244. uint8_t vdev_id, uint8_t *peer_mac);
  1245. };
  1246. /**
  1247. * struct cdp_mob_stats_ops - mcl mob stats ops
  1248. * @clear_stats: handler to clear ol txrx stats
  1249. * @stats: handler to update ol txrx stats
  1250. */
  1251. struct cdp_mob_stats_ops {
  1252. QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl,
  1253. uint8_t pdev_id, uint8_t bitmap);
  1254. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  1255. };
  1256. /**
  1257. * struct cdp_pmf_ops - mcl protected management frame ops
  1258. * @get_pn_info: handler to get pn info from peer
  1259. *
  1260. * Function pointers for pmf related operations.
  1261. */
  1262. struct cdp_pmf_ops {
  1263. void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac,
  1264. uint8_t vdev_id, uint8_t **last_pn_valid,
  1265. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  1266. };
  1267. #endif
  1268. #ifdef DP_FLOW_CTL
  1269. /**
  1270. * struct cdp_cfg_ops - mcl configuration ops
  1271. * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
  1272. * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
  1273. * @cfg_attach: hardcode the configuration parameters
  1274. * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
  1275. * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
  1276. * 1 enabled, 0 disabled.
  1277. * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
  1278. * indicate that mgmt over wmi is enabled
  1279. * or not,
  1280. * 1 for enabled, 0 for disable
  1281. * @is_high_latency: get device is high or low latency device,
  1282. * 1 high latency bus, 0 low latency bus
  1283. * @set_flow_control_parameters: set flow control parameters
  1284. * @set_flow_steering: set flow_steering_enabled flag
  1285. * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
  1286. * @set_new_htt_msg_format: set new_htt_msg_format flag
  1287. * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
  1288. * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
  1289. * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
  1290. * 1 enabled, 0 disabled.
  1291. * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
  1292. * 1 enabled, 0 disabled.
  1293. */
  1294. struct cdp_cfg_ops {
  1295. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  1296. uint8_t disable_rx_fwd);
  1297. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  1298. uint8_t val);
  1299. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  1300. void (*vdev_rx_set_intrabss_fwd)(struct cdp_soc_t *soc_hdl,
  1301. uint8_t vdev_id, bool val);
  1302. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  1303. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  1304. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  1305. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  1306. void *param);
  1307. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1308. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1309. void (*set_new_htt_msg_format)(uint8_t val);
  1310. void (*set_peer_unmap_conf_support)(bool val);
  1311. bool (*get_peer_unmap_conf_support)(void);
  1312. void (*set_tx_compl_tsf64)(bool val);
  1313. bool (*get_tx_compl_tsf64)(void);
  1314. };
  1315. /**
  1316. * struct cdp_flowctl_ops - mcl flow control
  1317. * @flow_pool_map_handler: handler to map flow_id and pool descriptors
  1318. * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors
  1319. * @register_pause_cb: handler to register tx pause callback
  1320. * @set_desc_global_pool_size: handler to set global pool size
  1321. * @dump_flow_pool_info: handler to dump global and flow pool info
  1322. * @tx_desc_thresh_reached: handler to set tx desc threshold
  1323. *
  1324. * Function pointers for operations related to flow control
  1325. */
  1326. struct cdp_flowctl_ops {
  1327. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  1328. uint8_t pdev_id,
  1329. uint8_t vdev_id);
  1330. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  1331. uint8_t pdev_id,
  1332. uint8_t vdev_id);
  1333. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  1334. tx_pause_callback);
  1335. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  1336. void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl);
  1337. bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl,
  1338. uint8_t vdev_id);
  1339. };
  1340. /**
  1341. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  1342. * @register_tx_flow_control: Register tx flow control callback
  1343. * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev
  1344. * @set_vdev_os_queue_status: Set vdev queue status
  1345. * @deregister_tx_flow_control_cb: Deregister tx flow control callback
  1346. * @flow_control_cb: Call osif flow control callback
  1347. * @get_tx_resource: Get tx resources and comapre with watermark
  1348. * @ll_set_tx_pause_q_depth: set pause queue depth
  1349. * @vdev_flush: Flush all packets on a particular vdev
  1350. * @vdev_pause: Pause a particular vdev
  1351. * @vdev_unpause: Unpause a particular vdev
  1352. *
  1353. * Function pointers for operations related to flow control
  1354. */
  1355. struct cdp_lflowctl_ops {
  1356. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  1357. int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl,
  1358. uint8_t pdev_id,
  1359. tx_pause_callback flowcontrol);
  1360. int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl,
  1361. uint8_t vdev_id, uint32_t chan_freq);
  1362. int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl,
  1363. uint8_t vdev_id,
  1364. enum netif_action_type action);
  1365. #else
  1366. int (*register_tx_flow_control)(
  1367. struct cdp_soc_t *soc_hdl,
  1368. uint8_t vdev_id,
  1369. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  1370. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  1371. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  1372. int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl,
  1373. uint8_t vdev_id);
  1374. void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1375. bool tx_resume);
  1376. bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1377. struct qdf_mac_addr peer_addr,
  1378. unsigned int low_watermark,
  1379. unsigned int high_watermark_offset);
  1380. int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1381. int pause_q_depth);
  1382. void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1383. void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1384. uint32_t reason, uint32_t pause_type);
  1385. void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1386. uint32_t reason, uint32_t pause_type);
  1387. };
  1388. /**
  1389. * struct cdp_throttle_ops - mcl throttle ops
  1390. * @throttle_init_period: handler to initialize tx throttle time
  1391. * @throttle_set_level: handler to set tx throttle level
  1392. */
  1393. struct cdp_throttle_ops {
  1394. void (*throttle_init_period)(struct cdp_soc_t *soc_hdl,
  1395. uint8_t pdev_id, int period,
  1396. uint8_t *dutycycle_level);
  1397. void (*throttle_set_level)(struct cdp_soc_t *soc_hdl,
  1398. uint8_t pdev_id, int level);
  1399. };
  1400. #endif
  1401. #ifdef IPA_OFFLOAD
  1402. /**
  1403. * struct cdp_ipa_ops - mcl ipa data path ops
  1404. * @ipa_get_resource:
  1405. * @ipa_set_doorbell_paddr:
  1406. * @ipa_set_active:
  1407. * @ipa_op_response:
  1408. * @ipa_register_op_cb:
  1409. * @ipa_get_stat:
  1410. * @ipa_tx_data_frame:
  1411. * @ipa_tx_buf_smmu_mapping: Create SMMU mappings for Tx
  1412. * @ipa_tx_buf_smmu_unmapping: Release SMMU mappings for Tx
  1413. * buffers to IPA
  1414. */
  1415. struct cdp_ipa_ops {
  1416. QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl,
  1417. uint8_t pdev_id);
  1418. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl,
  1419. uint8_t pdev_id);
  1420. QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1421. bool uc_active, bool is_tx);
  1422. QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl,
  1423. uint8_t pdev_id, uint8_t *op_msg);
  1424. QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl,
  1425. uint8_t pdev_id,
  1426. void (*ipa_uc_op_cb_type)
  1427. (uint8_t *op_msg, void *osif_ctxt),
  1428. void *usr_ctxt);
  1429. void (*ipa_deregister_op_cb)(struct cdp_soc_t *soc_hdl,
  1430. uint8_t pdev_id);
  1431. QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1432. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl,
  1433. uint8_t vdev_id, qdf_nbuf_t skb);
  1434. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  1435. uint32_t value);
  1436. #ifdef FEATURE_METERING
  1437. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl,
  1438. uint8_t pdev_id,
  1439. uint8_t reset_stats);
  1440. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl,
  1441. uint8_t pdev_id, uint64_t quota_bytes);
  1442. #endif
  1443. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl,
  1444. uint8_t pdev_id);
  1445. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl,
  1446. uint8_t pdev_id);
  1447. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
  1448. defined(CONFIG_IPA_WDI_UNIFIED_API)
  1449. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1450. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1451. void *ipa_wdi_meter_notifier_cb,
  1452. uint32_t ipa_desc_size, void *ipa_priv,
  1453. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1454. uint32_t *rx_pipe_handle, bool is_smmu_enabled,
  1455. qdf_ipa_sys_connect_params_t *sys_in,
  1456. bool over_gsi);
  1457. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  1458. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1459. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1460. void *ipa_wdi_meter_notifier_cb,
  1461. uint32_t ipa_desc_size, void *ipa_priv,
  1462. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1463. uint32_t *rx_pipe_handle);
  1464. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  1465. QDF_STATUS (*ipa_cleanup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1466. uint32_t tx_pipe_handle,
  1467. uint32_t rx_pipe_handle);
  1468. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  1469. qdf_ipa_client_type_t prod_client,
  1470. qdf_ipa_client_type_t cons_client,
  1471. uint8_t session_id, bool is_ipv6_enabled);
  1472. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  1473. QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl,
  1474. uint8_t pdev_id);
  1475. QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl,
  1476. uint8_t pdev_id);
  1477. QDF_STATUS (*ipa_set_perf_level)(int client,
  1478. uint32_t max_supported_bw_mbps);
  1479. bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1480. qdf_nbuf_t nbuf, bool *fwd_success);
  1481. QDF_STATUS (*ipa_tx_buf_smmu_mapping)(struct cdp_soc_t *soc_hdl,
  1482. uint8_t pdev_id);
  1483. QDF_STATUS (*ipa_tx_buf_smmu_unmapping)(struct cdp_soc_t *soc_hdl,
  1484. uint8_t pdev_id);
  1485. };
  1486. #endif
  1487. #ifdef DP_POWER_SAVE
  1488. /**
  1489. * struct cdp_tx_delay_ops - mcl tx delay ops
  1490. * @tx_delay: handler to get tx packet delay
  1491. * @tx_delay_hist: handler to get tx packet delay histogram
  1492. * @tx_packet_count: handler to get tx packet count
  1493. * @tx_set_compute_interval: update compute interval period for TSM stats
  1494. *
  1495. * Function pointer for operations related to tx delay.
  1496. */
  1497. struct cdp_tx_delay_ops {
  1498. void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1499. uint32_t *queue_delay_microsec,
  1500. uint32_t *tx_delay_microsec, int category);
  1501. void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1502. uint16_t *bin_values, int category);
  1503. void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1504. uint16_t *out_packet_count,
  1505. uint16_t *out_packet_loss_count, int category);
  1506. void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl,
  1507. uint8_t pdev_id, uint32_t interval);
  1508. };
  1509. /**
  1510. * struct cdp_bus_ops - mcl bus suspend/resume ops
  1511. * @bus_suspend: handler for bus suspend
  1512. * @bus_resume: handler for bus resume
  1513. * @process_wow_ack_rsp: handler for wow ack response
  1514. * @process_target_suspend_req: handler for target suspend request
  1515. */
  1516. struct cdp_bus_ops {
  1517. QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1518. QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1519. void (*process_wow_ack_rsp)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1520. void (*process_target_suspend_req)(struct cdp_soc_t *soc_hdl,
  1521. uint8_t pdev_id);
  1522. };
  1523. #endif
  1524. #ifdef RECEIVE_OFFLOAD
  1525. /**
  1526. * struct cdp_rx_offld_ops - mcl host receive offload ops
  1527. * @register_rx_offld_flush_cb:
  1528. * @deregister_rx_offld_flush_cb:
  1529. */
  1530. struct cdp_rx_offld_ops {
  1531. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1532. void (*deregister_rx_offld_flush_cb)(void);
  1533. };
  1534. #endif
  1535. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1536. /**
  1537. * struct cdp_cfr_ops - host cfr ops
  1538. * @txrx_cfr_filter: Handler to configure host rx monitor status ring
  1539. * @txrx_get_cfr_rcc: Handler to get CFR mode
  1540. * @txrx_set_cfr_rcc: Handler to enable/disable CFR mode
  1541. * @txrx_get_cfr_dbg_stats: Handler to get debug statistics for CFR mode
  1542. * @txrx_clear_cfr_dbg_stats: Handler to clear debug statistics for CFR mode
  1543. * @txrx_enable_mon_reap_timer: Enable/Disable reap timer of monitor status ring
  1544. */
  1545. struct cdp_cfr_ops {
  1546. void (*txrx_cfr_filter)(struct cdp_soc_t *soc_hdl,
  1547. uint8_t pdev_id,
  1548. bool enable,
  1549. struct cdp_monitor_filter *filter_val);
  1550. bool (*txrx_get_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1551. uint8_t pdev_id);
  1552. void (*txrx_set_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1553. uint8_t pdev_id,
  1554. bool enable);
  1555. void (*txrx_get_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1556. uint8_t pdev_id,
  1557. struct cdp_cfr_rcc_stats *buf);
  1558. void (*txrx_clear_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1559. uint8_t pdev_id);
  1560. void (*txrx_enable_mon_reap_timer)(struct cdp_soc_t *soc_hdl,
  1561. uint8_t pdev_id,
  1562. bool enable);
  1563. };
  1564. #endif
  1565. #ifdef WLAN_SUPPORT_MSCS
  1566. /**
  1567. * struct cdp_mscs_ops - data path ops for MSCS
  1568. * @mscs_peer_lookup_n_get_priority:
  1569. */
  1570. struct cdp_mscs_ops {
  1571. int (*mscs_peer_lookup_n_get_priority)(struct cdp_soc_t *soc,
  1572. uint8_t *src_mac,
  1573. uint8_t *dst_mac,
  1574. qdf_nbuf_t nbuf);
  1575. };
  1576. #endif
  1577. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1578. /**
  1579. * struct cdp_mesh_latency_ops - data path ops for Mesh latency
  1580. * @mesh_latency_update_peer_parameter:
  1581. */
  1582. struct cdp_mesh_latency_ops {
  1583. QDF_STATUS (*mesh_latency_update_peer_parameter)(
  1584. struct cdp_soc_t *soc,
  1585. uint8_t *dest_mac, uint32_t service_interval_dl,
  1586. uint32_t burst_size_dl, uint32_t service_interval_ul,
  1587. uint32_t burst_size_ul, uint16_t priority,
  1588. uint8_t add_or_sub);
  1589. };
  1590. #endif
  1591. struct cdp_ops {
  1592. struct cdp_cmn_ops *cmn_drv_ops;
  1593. struct cdp_ctrl_ops *ctrl_ops;
  1594. struct cdp_me_ops *me_ops;
  1595. struct cdp_mon_ops *mon_ops;
  1596. struct cdp_host_stats_ops *host_stats_ops;
  1597. struct cdp_wds_ops *wds_ops;
  1598. struct cdp_raw_ops *raw_ops;
  1599. struct cdp_pflow_ops *pflow_ops;
  1600. #ifdef DP_PEER_EXTENDED_API
  1601. struct cdp_misc_ops *misc_ops;
  1602. struct cdp_peer_ops *peer_ops;
  1603. struct cdp_ocb_ops *ocb_ops;
  1604. struct cdp_mob_stats_ops *mob_stats_ops;
  1605. struct cdp_pmf_ops *pmf_ops;
  1606. #endif
  1607. #ifdef DP_FLOW_CTL
  1608. struct cdp_cfg_ops *cfg_ops;
  1609. struct cdp_flowctl_ops *flowctl_ops;
  1610. struct cdp_lflowctl_ops *l_flowctl_ops;
  1611. struct cdp_throttle_ops *throttle_ops;
  1612. #endif
  1613. #ifdef DP_POWER_SAVE
  1614. struct cdp_bus_ops *bus_ops;
  1615. struct cdp_tx_delay_ops *delay_ops;
  1616. #endif
  1617. #ifdef IPA_OFFLOAD
  1618. struct cdp_ipa_ops *ipa_ops;
  1619. #endif
  1620. #ifdef RECEIVE_OFFLOAD
  1621. struct cdp_rx_offld_ops *rx_offld_ops;
  1622. #endif
  1623. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1624. struct cdp_cfr_ops *cfr_ops;
  1625. #endif
  1626. #ifdef WLAN_SUPPORT_MSCS
  1627. struct cdp_mscs_ops *mscs_ops;
  1628. #endif
  1629. #ifdef WLAN_SUPPORT_MESH_LATENCY
  1630. struct cdp_mesh_latency_ops *mesh_latency_ops;
  1631. #endif
  1632. };
  1633. #endif