cdp_txrx_ops.h 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. /*
  2. * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #ifdef IPA_OFFLOAD
  32. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  33. #include <qdf_ipa_wdi3.h>
  34. #else
  35. #include <qdf_ipa.h>
  36. #endif
  37. #endif
  38. /**
  39. * bitmap values to indicate special handling of peer_delete
  40. */
  41. #define CDP_PEER_DELETE_NO_SPECIAL 0
  42. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  43. struct hif_opaque_softc;
  44. /* same as ieee80211_nac_param */
  45. enum cdp_nac_param_cmd {
  46. /* IEEE80211_NAC_PARAM_ADD */
  47. CDP_NAC_PARAM_ADD = 1,
  48. /* IEEE80211_NAC_PARAM_DEL */
  49. CDP_NAC_PARAM_DEL,
  50. /* IEEE80211_NAC_PARAM_LIST */
  51. CDP_NAC_PARAM_LIST,
  52. };
  53. /******************************************************************************
  54. *
  55. * Control Interface (A Interface)
  56. *
  57. *****************************************************************************/
  58. struct cdp_cmn_ops {
  59. QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  60. int (*txrx_pdev_attach_target)(struct cdp_pdev *pdev);
  61. struct cdp_vdev *(*txrx_vdev_attach)
  62. (struct cdp_pdev *pdev, uint8_t *vdev_mac_addr,
  63. uint8_t vdev_id, enum wlan_op_mode op_mode,
  64. enum wlan_op_subtype subtype);
  65. void (*txrx_vdev_detach)
  66. (struct cdp_vdev *vdev, ol_txrx_vdev_delete_cb callback,
  67. void *cb_context);
  68. struct cdp_pdev *(*txrx_pdev_attach)
  69. (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  70. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id);
  71. int (*txrx_pdev_post_attach)(struct cdp_pdev *pdev);
  72. void (*txrx_pdev_pre_detach)(struct cdp_pdev *pdev, int force);
  73. void (*txrx_pdev_detach)(struct cdp_pdev *pdev, int force);
  74. /**
  75. * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
  76. * @pdev: Dp pdev handle
  77. * @force: Force deinit or not
  78. *
  79. * Return: None
  80. */
  81. void (*txrx_pdev_deinit)(struct cdp_pdev *pdev, int force);
  82. void *(*txrx_peer_create)
  83. (struct cdp_vdev *vdev, uint8_t *peer_mac_addr,
  84. struct cdp_ctrl_objmgr_peer *ctrl_peer);
  85. void (*txrx_peer_setup)
  86. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  87. void (*txrx_cp_peer_del_response)
  88. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev_hdl,
  89. uint8_t *peer_mac_addr);
  90. void (*txrx_peer_teardown)
  91. (struct cdp_vdev *vdev_hdl, void *peer_hdl);
  92. int (*txrx_peer_add_ast)
  93. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  94. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  95. uint32_t flags);
  96. int (*txrx_peer_update_ast)
  97. (ol_txrx_soc_handle soc, struct cdp_peer *peer_hdl,
  98. uint8_t *mac_addr, uint32_t flags);
  99. bool (*txrx_peer_get_ast_info_by_soc)
  100. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  101. struct cdp_ast_entry_info *ast_entry_info);
  102. bool (*txrx_peer_get_ast_info_by_pdev)
  103. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  104. uint8_t pdev_id,
  105. struct cdp_ast_entry_info *ast_entry_info);
  106. QDF_STATUS (*txrx_peer_ast_delete_by_soc)
  107. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  108. txrx_ast_free_cb callback,
  109. void *cookie);
  110. QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
  111. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  112. uint8_t pdev_id,
  113. txrx_ast_free_cb callback,
  114. void *cookie);
  115. void (*txrx_peer_delete)(void *peer, uint32_t bitmap);
  116. void (*txrx_vdev_flush_peers)(struct cdp_vdev *vdev, bool unmap_only);
  117. QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_vdev *vdev,
  118. uint8_t smart_monitor);
  119. void (*txrx_peer_delete_sync)(void *peer,
  120. QDF_STATUS(*delete_cb)(
  121. uint8_t vdev_id,
  122. uint32_t peerid_cnt,
  123. uint16_t *peerid_list),
  124. uint32_t bitmap);
  125. void (*txrx_peer_unmap_sync_cb_set)(struct cdp_pdev *pdev,
  126. QDF_STATUS(*unmap_resp_cb)(
  127. uint8_t vdev_id,
  128. uint32_t peerid_cnt,
  129. uint16_t *peerid_list));
  130. uint8_t (*txrx_get_pdev_id_frm_pdev)(struct cdp_pdev *pdev);
  131. bool (*txrx_get_vow_config_frm_pdev)(struct cdp_pdev *pdev);
  132. void (*txrx_pdev_set_chan_noise_floor)(struct cdp_pdev *pdev,
  133. int16_t chan_noise_floor);
  134. void (*txrx_set_nac)(struct cdp_peer *peer);
  135. /**
  136. * txrx_set_pdev_tx_capture() - callback to set pdev tx_capture
  137. * @soc: opaque soc handle
  138. * @pdev: data path pdev handle
  139. * @val: value of pdev_tx_capture
  140. *
  141. * Return: status: 0 - Success, non-zero: Failure
  142. */
  143. QDF_STATUS (*txrx_set_pdev_tx_capture)(struct cdp_pdev *pdev, int val);
  144. void (*txrx_get_peer_mac_from_peer_id)
  145. (struct cdp_pdev *pdev_handle,
  146. uint32_t peer_id, uint8_t *peer_mac);
  147. void (*txrx_vdev_tx_lock)(struct cdp_vdev *vdev);
  148. void (*txrx_vdev_tx_unlock)(struct cdp_vdev *vdev);
  149. void (*txrx_ath_getstats)(void *pdev,
  150. struct cdp_dev_stats *stats, uint8_t type);
  151. void (*txrx_set_gid_flag)(struct cdp_pdev *pdev, u_int8_t *mem_status,
  152. u_int8_t *user_position);
  153. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_pdev *pdev);
  154. void (*txrx_if_mgmt_drain)(void *ni, int force);
  155. void (*txrx_set_curchan)(struct cdp_pdev *pdev, uint32_t chan_mhz);
  156. void (*txrx_set_privacy_filters)
  157. (struct cdp_vdev *vdev, void *filter, uint32_t num);
  158. uint32_t (*txrx_get_cfg)(void *soc, enum cdp_dp_cfg cfg);
  159. /********************************************************************
  160. * Data Interface (B Interface)
  161. ********************************************************************/
  162. void (*txrx_vdev_register)(struct cdp_vdev *vdev,
  163. void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
  164. struct ol_txrx_ops *txrx_ops);
  165. int (*txrx_mgmt_send)(struct cdp_vdev *vdev,
  166. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  167. int (*txrx_mgmt_send_ext)(struct cdp_vdev *vdev,
  168. qdf_nbuf_t tx_mgmt_frm, uint8_t type, uint8_t use_6mbps,
  169. uint16_t chanfreq);
  170. /**
  171. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  172. * callback function
  173. */
  174. void (*txrx_mgmt_tx_cb_set)(struct cdp_pdev *pdev, uint8_t type,
  175. ol_txrx_mgmt_tx_cb download_cb,
  176. ol_txrx_mgmt_tx_cb ota_ack_cb,
  177. void *ctxt);
  178. int (*txrx_get_tx_pending)(struct cdp_pdev *pdev);
  179. /**
  180. * ol_txrx_data_tx_cb - Function registered with the data path
  181. * that is called when tx frames marked as "no free" are
  182. * done being transmitted
  183. */
  184. void (*txrx_data_tx_cb_set)(struct cdp_vdev *data_vdev,
  185. ol_txrx_data_tx_cb callback, void *ctxt);
  186. /*******************************************************************
  187. * Statistics and Debugging Interface (C Interface)
  188. ********************************************************************/
  189. int (*txrx_aggr_cfg)(struct cdp_vdev *vdev, int max_subfrms_ampdu,
  190. int max_subfrms_amsdu);
  191. A_STATUS (*txrx_fw_stats_get)(struct cdp_vdev *vdev,
  192. struct ol_txrx_stats_req *req,
  193. bool per_vdev, bool response_expected);
  194. int (*txrx_debug)(struct cdp_vdev *vdev, int debug_specs);
  195. void (*txrx_fw_stats_cfg)(struct cdp_vdev *vdev,
  196. uint8_t cfg_stats_type, uint32_t cfg_val);
  197. void (*txrx_print_level_set)(unsigned level);
  198. /**
  199. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  200. * @vdev: vdev handle
  201. *
  202. * Return: vdev mac address
  203. */
  204. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_vdev *vdev);
  205. /**
  206. * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  207. * vdev
  208. * @vdev: vdev handle
  209. *
  210. * Return: Handle to struct qdf_mac_addr
  211. */
  212. struct qdf_mac_addr *
  213. (*txrx_get_vdev_struct_mac_addr)(struct cdp_vdev *vdev);
  214. /**
  215. * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
  216. * @vdev: vdev handle
  217. *
  218. * Return: Handle to pdev
  219. */
  220. struct cdp_pdev *(*txrx_get_pdev_from_vdev)
  221. (struct cdp_vdev *vdev);
  222. /**
  223. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  224. * @vdev: vdev handle
  225. *
  226. * Return: Handle to control pdev
  227. */
  228. struct cdp_cfg *
  229. (*txrx_get_ctrl_pdev_from_vdev)(struct cdp_vdev *vdev);
  230. /**
  231. * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
  232. * @pdev: pdev handle
  233. *
  234. * Return: Handle to vdev
  235. */
  236. struct cdp_vdev *
  237. (*txrx_get_mon_vdev_from_pdev)(struct cdp_pdev *pdev);
  238. struct cdp_vdev *
  239. (*txrx_get_vdev_from_vdev_id)(struct cdp_pdev *pdev,
  240. uint8_t vdev_id);
  241. void (*txrx_soc_detach)(void *soc);
  242. /**
  243. * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
  244. * @soc: Opaque Dp handle
  245. *
  246. * Return: None
  247. */
  248. void (*txrx_soc_deinit)(void *soc);
  249. /**
  250. * txrx_soc_init() - Initialize dp soc and dp ring memory
  251. * @soc: Opaque Dp handle
  252. * @htchdl: Opaque htc handle
  253. * @hifhdl: Opaque hif handle
  254. *
  255. * Return: None
  256. */
  257. void *(*txrx_soc_init)(void *soc,
  258. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  259. struct hif_opaque_softc *hif_handle,
  260. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  261. struct ol_if_ops *ol_ops, uint16_t device_id);
  262. /**
  263. * txrx_tso_soc_attach() - TSO attach handler triggered during
  264. * dynamic tso activation
  265. * @soc: Opaque Dp handle
  266. *
  267. * Return: QDF status
  268. */
  269. QDF_STATUS (*txrx_tso_soc_attach)(void *soc);
  270. /**
  271. * txrx_tso_soc_detach() - TSO detach handler triggered during
  272. * dynamic tso de-activation
  273. * @soc: Opaque Dp handle
  274. *
  275. * Return: QDF status
  276. */
  277. QDF_STATUS (*txrx_tso_soc_detach)(void *soc);
  278. int (*addba_resp_tx_completion)(void *peer_handle, uint8_t tid,
  279. int status);
  280. int (*addba_requestprocess)(void *peer_handle, uint8_t dialogtoken,
  281. uint16_t tid, uint16_t batimeout,
  282. uint16_t buffersize,
  283. uint16_t startseqnum);
  284. void (*addba_responsesetup)(void *peer_handle, uint8_t tid,
  285. uint8_t *dialogtoken, uint16_t *statuscode,
  286. uint16_t *buffersize, uint16_t *batimeout);
  287. int (*delba_process)(void *peer_handle,
  288. int tid, uint16_t reasoncode);
  289. /**
  290. * delba_tx_completion() - Indicate delba tx status
  291. * @peer_handle: Peer handle
  292. * @tid: Tid number
  293. * @status: Tx completion status
  294. *
  295. * Return: 0 on Success, 1 on failure
  296. */
  297. int (*delba_tx_completion)(void *peer_handle,
  298. uint8_t tid, int status);
  299. void (*set_addba_response)(void *peer_handle,
  300. uint8_t tid, uint16_t statuscode);
  301. uint8_t (*get_peer_mac_addr_frm_id)(struct cdp_soc_t *soc_handle,
  302. uint16_t peer_id, uint8_t *mac_addr);
  303. void (*set_vdev_dscp_tid_map)(struct cdp_vdev *vdev_handle,
  304. uint8_t map_id);
  305. int (*txrx_get_total_per)(struct cdp_pdev *pdev_handle);
  306. void (*flush_cache_rx_queue)(void);
  307. void (*set_pdev_dscp_tid_map)(struct cdp_pdev *pdev, uint8_t map_id,
  308. uint8_t tos, uint8_t tid);
  309. void (*hmmc_tid_override_en)(struct cdp_pdev *pdev, bool val);
  310. void (*set_hmmc_tid_val)(struct cdp_pdev *pdev, uint8_t tid);
  311. QDF_STATUS(*txrx_stats_request)(struct cdp_soc_t *soc_handle,
  312. uint8_t vdev_id,
  313. struct cdp_txrx_stats_req *req);
  314. QDF_STATUS (*display_stats)(void *psoc, uint16_t value,
  315. enum qdf_stats_verbosity_level level);
  316. void (*txrx_soc_set_nss_cfg)(ol_txrx_soc_handle soc, int config);
  317. int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
  318. QDF_STATUS (*txrx_intr_attach)(void *soc);
  319. void (*txrx_intr_detach)(void *soc);
  320. void (*set_pn_check)(struct cdp_vdev *vdev,
  321. struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
  322. uint32_t *rx_pn);
  323. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  324. struct cdp_config_params *params);
  325. void *(*get_dp_txrx_handle)(struct cdp_pdev *pdev_hdl);
  326. void (*set_dp_txrx_handle)(struct cdp_pdev *pdev_hdl,
  327. void *dp_txrx_hdl);
  328. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  329. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  330. void *dp_txrx_handle);
  331. void (*map_pdev_to_lmac)(struct cdp_pdev *pdev_hdl,
  332. uint32_t lmac_id);
  333. void (*set_pdev_status_down)(struct cdp_pdev *pdev_hdl, bool is_pdev_down);
  334. void (*txrx_peer_reset_ast)
  335. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
  336. uint8_t *peer_macaddr, void *vdev_hdl);
  337. void (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  338. void *vdev_hdl);
  339. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  340. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  341. uint8_t ac, uint32_t value);
  342. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  343. uint8_t ac, uint32_t *value);
  344. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  345. uint32_t num_peers,
  346. uint32_t max_ast_index,
  347. bool peer_map_unmap_v2);
  348. void (*txrx_pdev_set_ctrl_pdev)(struct cdp_pdev *pdev_hdl,
  349. struct cdp_ctrl_objmgr_pdev *ctrl_pdev);
  350. ol_txrx_tx_fp tx_send;
  351. /**
  352. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  353. * to deliver pkt to stack.
  354. * @vdev: vdev handle
  355. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  356. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  357. */
  358. void (*txrx_get_os_rx_handles_from_vdev)
  359. (struct cdp_vdev *vdev,
  360. ol_txrx_rx_fp *stack_fn,
  361. ol_osif_vdev_handle *osif_vdev);
  362. int (*txrx_classify_update)
  363. (struct cdp_vdev *vdev, qdf_nbuf_t skb,
  364. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  365. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  366. enum cdp_capabilities dp_caps);
  367. void (*set_rate_stats_ctx)(struct cdp_soc_t *soc, void *ctx);
  368. void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
  369. void (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
  370. struct cdp_pdev *pdev,
  371. void *buf);
  372. void (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
  373. struct cdp_pdev *pdev);
  374. QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_pdev *pdev,
  375. uint8_t pcp, uint8_t tid);
  376. QDF_STATUS (*set_pdev_tidmap_prty)(struct cdp_pdev *pdev, uint8_t prty);
  377. QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_vdev *vdev,
  378. uint8_t pcp, uint8_t tid);
  379. QDF_STATUS (*set_vdev_tidmap_prty)(struct cdp_vdev *vdev, uint8_t prty);
  380. QDF_STATUS (*set_vdev_tidmap_tbl_id)(struct cdp_vdev *vdev,
  381. uint8_t mapid);
  382. #ifdef QCA_MULTIPASS_SUPPORT
  383. QDF_STATUS (*set_vlan_groupkey)(struct cdp_vdev *vdev_handle,
  384. uint16_t vlan_id, uint16_t group_key);
  385. #endif
  386. };
  387. struct cdp_ctrl_ops {
  388. int
  389. (*txrx_mempools_attach)(void *ctrl_pdev);
  390. int
  391. (*txrx_set_filter_neighbour_peers)(
  392. struct cdp_pdev *pdev,
  393. uint32_t val);
  394. int
  395. (*txrx_update_filter_neighbour_peers)(
  396. struct cdp_vdev *vdev,
  397. uint32_t cmd, uint8_t *macaddr);
  398. /**
  399. * @brief set the safemode of the device
  400. * @details
  401. * This flag is used to bypass the encrypt and decrypt processes when
  402. * send and receive packets. It works like open AUTH mode, HW will
  403. * ctreate all packets as non-encrypt frames because no key installed.
  404. * For rx fragmented frames,it bypasses all the rx defragmentaion.
  405. *
  406. * @param vdev - the data virtual device object
  407. * @param val - the safemode state
  408. * @return - void
  409. */
  410. void
  411. (*txrx_set_safemode)(
  412. struct cdp_vdev *vdev,
  413. u_int32_t val);
  414. /**
  415. * @brief configure the drop unencrypted frame flag
  416. * @details
  417. * Rx related. When set this flag, all the unencrypted frames
  418. * received over a secure connection will be discarded
  419. *
  420. * @param vdev - the data virtual device object
  421. * @param val - flag
  422. * @return - void
  423. */
  424. void
  425. (*txrx_set_drop_unenc)(
  426. struct cdp_vdev *vdev,
  427. u_int32_t val);
  428. /**
  429. * @brief set the Tx encapsulation type of the VDEV
  430. * @details
  431. * This will be used to populate the HTT desc packet type field
  432. * during Tx
  433. * @param vdev - the data virtual device object
  434. * @param val - the Tx encap type
  435. * @return - void
  436. */
  437. void
  438. (*txrx_set_tx_encap_type)(
  439. struct cdp_vdev *vdev,
  440. enum htt_cmn_pkt_type val);
  441. /**
  442. * @brief set the Rx decapsulation type of the VDEV
  443. * @details
  444. * This will be used to configure into firmware and hardware
  445. * which format to decap all Rx packets into, for all peers under
  446. * the VDEV.
  447. * @param vdev - the data virtual device object
  448. * @param val - the Rx decap mode
  449. * @return - void
  450. */
  451. void
  452. (*txrx_set_vdev_rx_decap_type)(
  453. struct cdp_vdev *vdev,
  454. enum htt_cmn_pkt_type val);
  455. /**
  456. * @brief get the Rx decapsulation type of the VDEV
  457. *
  458. * @param vdev - the data virtual device object
  459. * @return - the Rx decap type
  460. */
  461. enum htt_cmn_pkt_type
  462. (*txrx_get_vdev_rx_decap_type)(struct cdp_vdev *vdev);
  463. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  464. /**
  465. * @brief Update the authorize peer object at association time
  466. * @details
  467. * For the host-based implementation of rate-control, it
  468. * updates the peer/node-related parameters within rate-control
  469. * context of the peer at association.
  470. *
  471. * @param peer - pointer to the node's object
  472. * @authorize - either to authorize or unauthorize peer
  473. *
  474. * @return none
  475. */
  476. void
  477. (*txrx_peer_authorize)(struct cdp_peer *peer,
  478. u_int32_t authorize);
  479. /* Should be ol_txrx_ctrl_api.h */
  480. void (*txrx_set_mesh_mode)(struct cdp_vdev *vdev, u_int32_t val);
  481. /**
  482. * @brief setting mesh rx filter
  483. * @details
  484. * based on the bits enabled in the filter packets has to be dropped.
  485. *
  486. * @param vdev - the data virtual device object
  487. * @param val - value to set
  488. */
  489. void (*txrx_set_mesh_rx_filter)(struct cdp_vdev *vdev, uint32_t val);
  490. void (*tx_flush_buffers)(struct cdp_vdev *vdev);
  491. int (*txrx_is_target_ar900b)(struct cdp_vdev *vdev);
  492. void (*txrx_set_vdev_param)(struct cdp_vdev *vdev,
  493. enum cdp_vdev_param_type param, uint32_t val);
  494. void (*txrx_peer_set_nawds)(struct cdp_peer *peer, uint8_t value);
  495. /**
  496. * @brief Set the reo dest ring num of the radio
  497. * @details
  498. * Set the reo destination ring no on which we will receive
  499. * pkts for this radio.
  500. *
  501. * @param pdev - the data physical device object
  502. * @param reo_dest_ring_num - value ranges between 1 - 4
  503. */
  504. void (*txrx_set_pdev_reo_dest)(
  505. struct cdp_pdev *pdev,
  506. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  507. /**
  508. * @brief Get the reo dest ring num of the radio
  509. * @details
  510. * Get the reo destination ring no on which we will receive
  511. * pkts for this radio.
  512. *
  513. * @param pdev - the data physical device object
  514. * @return the reo destination ring number
  515. */
  516. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  517. struct cdp_pdev *pdev);
  518. int (*txrx_wdi_event_sub)(struct cdp_pdev *pdev, void *event_cb_sub,
  519. uint32_t event);
  520. int (*txrx_wdi_event_unsub)(struct cdp_pdev *pdev, void *event_cb_sub,
  521. uint32_t event);
  522. int (*txrx_get_sec_type)(struct cdp_peer *peer, uint8_t sec_idx);
  523. void (*txrx_update_mgmt_txpow_vdev)(struct cdp_vdev *vdev,
  524. uint8_t subtype, uint8_t tx_power);
  525. /**
  526. * txrx_set_pdev_param() - callback to set pdev parameter
  527. * @soc: opaque soc handle
  528. * @pdev: data path pdev handle
  529. * @val: value of pdev_tx_capture
  530. *
  531. * Return: status: 0 - Success, non-zero: Failure
  532. */
  533. QDF_STATUS (*txrx_set_pdev_param)(struct cdp_pdev *pdev,
  534. enum cdp_pdev_param_type type,
  535. uint32_t val);
  536. void * (*txrx_get_pldev)(struct cdp_pdev *pdev);
  537. #ifdef ATH_SUPPORT_NAC_RSSI
  538. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_vdev *vdev,
  539. enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
  540. uint8_t chan_num);
  541. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_vdev *vdev,
  542. char *macaddr,
  543. uint8_t *rssi);
  544. #endif
  545. void (*set_key)(struct cdp_peer *peer_handle,
  546. bool is_unicast, uint32_t *key);
  547. uint32_t (*txrx_get_vdev_param)(struct cdp_vdev *vdev,
  548. enum cdp_vdev_param_type param);
  549. int (*enable_peer_based_pktlog)(struct cdp_pdev
  550. *txrx_pdev_handle, char *macaddr, uint8_t enb_dsb);
  551. void (*calculate_delay_stats)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf);
  552. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  553. QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
  554. struct cdp_pdev *txrx_pdev_handle,
  555. uint32_t protocol_mask, uint16_t protocol_type,
  556. uint16_t tag);
  557. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  558. void (*txrx_dump_pdev_rx_protocol_tag_stats)(
  559. struct cdp_pdev *txrx_pdev_handle,
  560. uint16_t protocol_type);
  561. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  562. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  563. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  564. QDF_STATUS (*txrx_set_rx_flow_tag)(
  565. struct cdp_pdev *txrx_pdev_handle,
  566. struct cdp_rx_flow_info *flow_info);
  567. QDF_STATUS (*txrx_dump_rx_flow_tag_stats)(
  568. struct cdp_pdev *txrx_pdev_handle,
  569. struct cdp_rx_flow_info *flow_info);
  570. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  571. #ifdef QCA_MULTIPASS_SUPPORT
  572. void (*txrx_peer_set_vlan_id)(ol_txrx_soc_handle soc,
  573. struct cdp_vdev *vdev, uint8_t *peer_mac,
  574. uint16_t vlan_id);
  575. #endif
  576. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  577. QDF_STATUS (*txrx_update_peer_pkt_capture_params)(
  578. struct cdp_pdev *txrx_pdev_handle,
  579. bool is_rx_pkt_cap_enable, bool is_tx_pkt_cap_enable,
  580. uint8_t *peer_mac);
  581. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  582. };
  583. struct cdp_me_ops {
  584. u_int16_t (*tx_desc_alloc_and_mark_for_mcast_clone)
  585. (struct cdp_pdev *pdev, u_int16_t buf_count);
  586. u_int16_t (*tx_desc_free_and_unmark_for_mcast_clone)(
  587. struct cdp_pdev *pdev,
  588. u_int16_t buf_count);
  589. u_int16_t
  590. (*tx_get_mcast_buf_allocated_marked)
  591. (struct cdp_pdev *pdev);
  592. void
  593. (*tx_me_alloc_descriptor)(struct cdp_pdev *pdev);
  594. void
  595. (*tx_me_free_descriptor)(struct cdp_pdev *pdev);
  596. uint16_t
  597. (*tx_me_convert_ucast)(struct cdp_vdev *vdev,
  598. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  599. uint8_t newmaccnt);
  600. /* Should be a function pointer in ol_txrx_osif_ops{} */
  601. /**
  602. * @brief notify mcast frame indication from FW.
  603. * @details
  604. * This notification will be used to convert
  605. * multicast frame to unicast.
  606. *
  607. * @param pdev - handle to the ctrl SW's physical device object
  608. * @param vdev_id - ID of the virtual device received the special data
  609. * @param msdu - the multicast msdu returned by FW for host inspect
  610. */
  611. int (*mcast_notify)(struct cdp_pdev *pdev,
  612. u_int8_t vdev_id, qdf_nbuf_t msdu);
  613. };
  614. struct cdp_mon_ops {
  615. void (*txrx_monitor_set_filter_ucast_data)
  616. (struct cdp_pdev *, u_int8_t val);
  617. void (*txrx_monitor_set_filter_mcast_data)
  618. (struct cdp_pdev *, u_int8_t val);
  619. void (*txrx_monitor_set_filter_non_data)
  620. (struct cdp_pdev *, u_int8_t val);
  621. bool (*txrx_monitor_get_filter_ucast_data)
  622. (struct cdp_vdev *vdev_txrx_handle);
  623. bool (*txrx_monitor_get_filter_mcast_data)
  624. (struct cdp_vdev *vdev_txrx_handle);
  625. bool (*txrx_monitor_get_filter_non_data)
  626. (struct cdp_vdev *vdev_txrx_handle);
  627. QDF_STATUS (*txrx_reset_monitor_mode)(struct cdp_pdev *pdev);
  628. /* HK advance monitor filter support */
  629. QDF_STATUS (*txrx_set_advance_monitor_filter)
  630. (struct cdp_pdev *pdev, struct cdp_monitor_filter *filter_val);
  631. void (*txrx_monitor_record_channel)
  632. (struct cdp_pdev *, int val);
  633. void (*txrx_deliver_tx_mgmt)
  634. (struct cdp_pdev *pdev, qdf_nbuf_t nbuf);
  635. };
  636. struct cdp_host_stats_ops {
  637. int (*txrx_host_stats_get)(struct cdp_vdev *vdev,
  638. struct ol_txrx_stats_req *req);
  639. void (*txrx_host_stats_clr)(struct cdp_vdev *vdev);
  640. void (*txrx_host_ce_stats)(struct cdp_vdev *vdev);
  641. int (*txrx_stats_publish)(struct cdp_pdev *pdev,
  642. struct cdp_stats_extd *buf);
  643. /**
  644. * @brief Enable enhanced stats functionality.
  645. *
  646. * @param pdev - the physical device object
  647. * @return - void
  648. */
  649. void (*txrx_enable_enhanced_stats)(struct cdp_pdev *pdev);
  650. /**
  651. * @brief Disable enhanced stats functionality.
  652. *
  653. * @param pdev - the physical device object
  654. * @return - void
  655. */
  656. void (*txrx_disable_enhanced_stats)(struct cdp_pdev *pdev);
  657. /**
  658. * @brief Get the desired stats from the message.
  659. *
  660. * @param pdev - the physical device object
  661. * @param stats_base - stats buffer received from FW
  662. * @param type - stats type.
  663. * @return - pointer to requested stat identified by type
  664. */
  665. uint32_t * (*txrx_get_stats_base)(struct cdp_pdev *pdev,
  666. uint32_t *stats_base, uint32_t msg_len, uint8_t type);
  667. void
  668. (*tx_print_tso_stats)(struct cdp_vdev *vdev);
  669. void
  670. (*tx_rst_tso_stats)(struct cdp_vdev *vdev);
  671. void
  672. (*tx_print_sg_stats)(struct cdp_vdev *vdev);
  673. void
  674. (*tx_rst_sg_stats)(struct cdp_vdev *vdev);
  675. void
  676. (*print_rx_cksum_stats)(struct cdp_vdev *vdev);
  677. void
  678. (*rst_rx_cksum_stats)(struct cdp_vdev *vdev);
  679. A_STATUS
  680. (*txrx_host_me_stats)(struct cdp_vdev *vdev);
  681. void
  682. (*txrx_per_peer_stats)(struct cdp_pdev *pdev, char *addr);
  683. int (*txrx_host_msdu_ttl_stats)(struct cdp_vdev *vdev,
  684. struct ol_txrx_stats_req *req);
  685. void
  686. (*print_lro_stats)(struct cdp_vdev *vdev);
  687. void
  688. (*reset_lro_stats)(struct cdp_vdev *vdev);
  689. void
  690. (*get_fw_peer_stats)(struct cdp_pdev *pdev, uint8_t *addr,
  691. uint32_t cap, uint32_t copy_stats);
  692. void
  693. (*get_htt_stats)(struct cdp_pdev *pdev, void *data,
  694. uint32_t data_len);
  695. void
  696. (*txrx_update_pdev_stats)(struct cdp_pdev *pdev, void *data,
  697. uint16_t stats_id);
  698. struct cdp_peer_stats*
  699. (*txrx_get_peer_stats)(struct cdp_peer *peer);
  700. void
  701. (*txrx_reset_peer_ald_stats)(struct cdp_peer *peer);
  702. void
  703. (*txrx_reset_peer_stats)(struct cdp_peer *peer);
  704. int
  705. (*txrx_get_vdev_stats)(struct cdp_vdev *vdev, void *buf,
  706. bool is_aggregate);
  707. int
  708. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  709. void *data, uint32_t len,
  710. uint32_t stats_id);
  711. int
  712. (*txrx_get_vdev_extd_stats)(struct cdp_vdev *vdev_handle,
  713. void *buffer);
  714. void
  715. (*txrx_update_vdev_stats)(struct cdp_vdev *vdev, void *buf,
  716. uint16_t stats_id);
  717. int
  718. (*txrx_get_radio_stats)(struct cdp_pdev *pdev,
  719. void *buf);
  720. struct cdp_pdev_stats*
  721. (*txrx_get_pdev_stats)(struct cdp_pdev *pdev);
  722. int
  723. (*txrx_get_ratekbps)(int preamb, int mcs,
  724. int htflag, int gintval);
  725. void
  726. (*configure_rate_stats)(struct cdp_soc_t *soc,
  727. uint8_t val);
  728. };
  729. struct cdp_wds_ops {
  730. void
  731. (*txrx_set_wds_rx_policy)(struct cdp_vdev *vdev,
  732. u_int32_t val);
  733. void
  734. (*txrx_wds_peer_tx_policy_update)(struct cdp_peer *peer,
  735. int wds_tx_ucast, int wds_tx_mcast);
  736. int (*vdev_set_wds)(void *vdev, uint32_t val);
  737. };
  738. struct cdp_raw_ops {
  739. int (*txrx_get_nwifi_mode)(struct cdp_vdev *vdev);
  740. void (*rsim_get_astentry)(struct cdp_vdev *vdev,
  741. qdf_nbuf_t *pnbuf,
  742. struct cdp_raw_ast *raw_ast);
  743. };
  744. #ifdef PEER_FLOW_CONTROL
  745. struct cdp_pflow_ops {
  746. uint32_t(*pflow_update_pdev_params)(void *,
  747. enum _ol_ath_param_t, uint32_t, void *);
  748. };
  749. #endif /* PEER_FLOW_CONTROL */
  750. #define LRO_IPV4_SEED_ARR_SZ 5
  751. #define LRO_IPV6_SEED_ARR_SZ 11
  752. /**
  753. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  754. * @lro_enable: indicates whether rx_offld is enabled
  755. * @tcp_flag: If the TCP flags from the packet do not match
  756. * the values in this field after masking with TCP flags mask
  757. * below, packet is not rx_offld eligible
  758. * @tcp_flag_mask: field for comparing the TCP values provided
  759. * above with the TCP flags field in the received packet
  760. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  761. * 5-tuple toeplitz hash for ipv4 packets
  762. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  763. * 5-tuple toeplitz hash for ipv6 packets
  764. */
  765. struct cdp_lro_hash_config {
  766. uint32_t lro_enable;
  767. uint32_t tcp_flag:9,
  768. tcp_flag_mask:9;
  769. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  770. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  771. };
  772. struct ol_if_ops {
  773. void
  774. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  775. uint8_t *peer_macaddr, uint8_t vdev_id,
  776. bool hash_based, uint8_t ring_num);
  777. QDF_STATUS
  778. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  779. uint8_t vdev_id, uint8_t *peer_mac,
  780. qdf_dma_addr_t hw_qdesc, int tid,
  781. uint16_t queue_num,
  782. uint8_t ba_window_size_valid,
  783. uint16_t ba_window_size);
  784. QDF_STATUS
  785. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  786. uint8_t vdev_id, uint8_t *peer_macaddr,
  787. uint32_t tid_mask);
  788. int (*peer_unref_delete)(void *scn_handle, uint8_t *peer_mac,
  789. uint8_t *vdev_mac, enum wlan_op_mode opmode,
  790. void *old_peer, void *new_peer);
  791. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  792. int (*peer_add_wds_entry)(void *vdev_handle,
  793. struct cdp_peer *peer_handle,
  794. const uint8_t *dest_macaddr,
  795. uint8_t *next_node_mac,
  796. uint32_t flags);
  797. int (*peer_update_wds_entry)(void *ol_soc_handle,
  798. uint8_t *dest_macaddr, uint8_t *peer_macaddr,
  799. uint32_t flags);
  800. void (*peer_del_wds_entry)(void *ol_soc_handle,
  801. uint8_t *wds_macaddr,
  802. uint8_t type);
  803. QDF_STATUS
  804. (*lro_hash_config)(struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  805. struct cdp_lro_hash_config *rx_offld_hash);
  806. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  807. uint8_t type);
  808. #ifdef FEATURE_NAC_RSSI
  809. uint8_t (*rx_invalid_peer)(void *ctrl_pdev, void *msg);
  810. #else
  811. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  812. #endif
  813. int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  814. uint16_t peer_id, uint16_t hw_peer_id,
  815. uint8_t vdev_id, uint8_t *peer_mac_addr,
  816. enum cdp_txrx_ast_entry_type peer_type,
  817. uint32_t tx_ast_hashidx);
  818. int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  819. uint16_t peer_id,
  820. uint8_t vdev_id);
  821. int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  822. enum cdp_cfg_param_type param_num);
  823. void (*rx_mic_error)(void *ol_soc_handle,
  824. struct cdp_rx_mic_err_info *info);
  825. bool (*rx_frag_tkip_demic)(struct wlan_objmgr_peer *ctrl_peer,
  826. qdf_nbuf_t nbuf,
  827. uint16_t hdr_space);
  828. uint8_t (*freq_to_channel)(void *ol_soc_handle, uint16_t vdev_id);
  829. void (*record_act_change)(struct wlan_objmgr_pdev *pdev,
  830. u_int8_t *dstmac, bool active);
  831. #ifdef ATH_SUPPORT_NAC_RSSI
  832. int (*config_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  833. u_int8_t vdev_id, enum cdp_nac_param_cmd cmd, char *bssid,
  834. char *client_macaddr, uint8_t chan_num);
  835. int (*config_bssid_in_fw_for_nac_rssi)(struct wlan_objmgr_pdev *pdev,
  836. u_int8_t vdev_id,
  837. enum cdp_nac_param_cmd cmd,
  838. char *bssid, char *client_mac);
  839. #endif
  840. int (*peer_sta_kickout)(void *ctrl_pdev, uint8_t *peer_macaddr);
  841. /**
  842. * send_delba() - Send delba to peer
  843. * @pdev_handle: Dp pdev handle
  844. * @ctrl_peer: Peer handle
  845. * @peer_macaddr: Peer mac addr
  846. * @tid: Tid number
  847. *
  848. * Return: 0 for success, non-zero for failure
  849. */
  850. int (*send_delba)(void *pdev_handle, void *ctrl_peer,
  851. uint8_t *peer_macaddr, uint8_t tid, void *vdev_handle,
  852. uint8_t reason_code);
  853. int (*peer_delete_multiple_wds_entries)(void *vdev_handle,
  854. uint8_t *dest_macaddr,
  855. uint8_t *peer_macaddr,
  856. uint32_t flags);
  857. bool (*is_roam_inprogress)(uint32_t vdev_id);
  858. enum QDF_GLOBAL_MODE (*get_con_mode)(void);
  859. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  860. };
  861. #ifdef DP_PEER_EXTENDED_API
  862. /**
  863. * struct cdp_misc_ops - mcl ops not classified
  864. * @set_ibss_vdev_heart_beat_timer:
  865. * @bad_peer_txctl_set_setting:
  866. * @bad_peer_txctl_update_threshold:
  867. * @hl_tdls_flag_reset:
  868. * @tx_non_std:
  869. * @get_vdev_id:
  870. * @set_wisa_mode:
  871. * @txrx_data_stall_cb_register:
  872. * @txrx_data_stall_cb_deregister:
  873. * @txrx_post_data_stall_event
  874. * @runtime_suspend:
  875. * @runtime_resume:
  876. * @register_packetdump_cb:
  877. * @unregister_packetdump_cb:
  878. * @pdev_reset_driver_del_ack:
  879. * @vdev_set_driver_del_ack_enable:
  880. */
  881. struct cdp_misc_ops {
  882. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_vdev *vdev,
  883. uint16_t timer_value_sec);
  884. void (*set_wmm_param)(struct cdp_pdev *cfg_pdev,
  885. struct ol_tx_wmm_param_t wmm_param);
  886. void (*bad_peer_txctl_set_setting)(struct cdp_pdev *pdev, int enable,
  887. int period, int txq_limit);
  888. void (*bad_peer_txctl_update_threshold)(struct cdp_pdev *pdev,
  889. int level, int tput_thresh, int tx_limit);
  890. void (*hl_tdls_flag_reset)(struct cdp_vdev *vdev, bool flag);
  891. qdf_nbuf_t (*tx_non_std)(struct cdp_vdev *vdev,
  892. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  893. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  894. uint32_t (*get_tx_ack_stats)(struct cdp_pdev *pdev, uint8_t vdev_id);
  895. QDF_STATUS (*set_wisa_mode)(struct cdp_vdev *vdev, bool enable);
  896. QDF_STATUS (*txrx_data_stall_cb_register)(data_stall_detect_cb cb);
  897. QDF_STATUS (*txrx_data_stall_cb_deregister)(data_stall_detect_cb cb);
  898. void (*txrx_post_data_stall_event)(
  899. enum data_stall_log_event_indicator indicator,
  900. enum data_stall_log_event_type data_stall_type,
  901. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  902. enum data_stall_log_recovery_type recovery_type);
  903. QDF_STATUS (*runtime_suspend)(struct cdp_pdev *pdev);
  904. QDF_STATUS (*runtime_resume)(struct cdp_pdev *pdev);
  905. int (*get_opmode)(struct cdp_vdev *vdev);
  906. void (*mark_first_wakeup_packet)(uint8_t value);
  907. void (*update_mac_id)(uint8_t vdev_id, uint8_t mac_id);
  908. void (*flush_rx_frames)(void *peer, bool drop);
  909. A_STATUS (*get_intra_bss_fwd_pkts_count)(uint8_t vdev_id,
  910. uint64_t *fwd_tx_packets, uint64_t *fwd_rx_packets);
  911. void (*pkt_log_init)(struct cdp_pdev *handle, void *scn);
  912. void (*pkt_log_con_service)(struct cdp_pdev *pdev, void *scn);
  913. int (*get_num_rx_contexts)(struct cdp_soc_t *soc);
  914. void (*register_pktdump_cb)(ol_txrx_pktdump_cb tx_cb,
  915. ol_txrx_pktdump_cb rx_cb);
  916. void (*unregister_pktdump_cb)(void);
  917. void (*pdev_reset_driver_del_ack)(struct cdp_pdev *ppdev);
  918. void (*vdev_set_driver_del_ack_enable)(uint8_t vdev_id,
  919. unsigned long rx_packets,
  920. uint32_t time_in_ms,
  921. uint32_t high_th,
  922. uint32_t low_th);
  923. };
  924. /**
  925. * struct cdp_ocb_ops - mcl ocb ops
  926. * @set_ocb_chan_info:
  927. * @get_ocb_chan_info:
  928. */
  929. struct cdp_ocb_ops {
  930. void (*set_ocb_chan_info)(struct cdp_vdev *vdev,
  931. struct ol_txrx_ocb_set_chan ocb_set_chan);
  932. struct ol_txrx_ocb_chan_info *
  933. (*get_ocb_chan_info)(struct cdp_vdev *vdev);
  934. };
  935. /**
  936. * struct cdp_peer_ops - mcl peer related ops
  937. * @register_peer:
  938. * @clear_peer:
  939. * @cfg_attach:
  940. * @find_peer_by_addr:
  941. * @find_peer_by_addr_and_vdev:
  942. * @local_peer_id:
  943. * @peer_find_by_local_id:
  944. * @peer_state_update:
  945. * @get_vdevid:
  946. * @get_vdev_by_sta_id:
  947. * @register_ocb_peer:
  948. * @peer_get_peer_mac_addr:
  949. * @get_peer_state:
  950. * @get_vdev_for_peer:
  951. * @update_ibss_add_peer_num_of_vdev:
  952. * @remove_peers_for_vdev:
  953. * @remove_peers_for_vdev_no_lock:
  954. * @copy_mac_addr_raw:
  955. * @add_last_real_peer:
  956. * @is_vdev_restore_last_peer:
  957. * @update_last_real_peer:
  958. */
  959. struct cdp_peer_ops {
  960. QDF_STATUS (*register_peer)(struct cdp_pdev *pdev,
  961. struct ol_txrx_desc_type *sta_desc);
  962. QDF_STATUS (*clear_peer)(struct cdp_pdev *pdev,
  963. struct qdf_mac_addr peer_addr);
  964. QDF_STATUS (*change_peer_state)(uint8_t sta_id,
  965. enum ol_txrx_peer_state sta_state,
  966. bool roam_synch_in_progress);
  967. void * (*peer_get_ref_by_addr)(struct cdp_pdev *pdev,
  968. uint8_t *peer_addr, uint8_t *peer_id,
  969. enum peer_debug_id_type debug_id);
  970. void (*peer_release_ref)(void *peer, enum peer_debug_id_type debug_id);
  971. void * (*find_peer_by_addr)(struct cdp_pdev *pdev,
  972. uint8_t *peer_addr, uint8_t *peer_id);
  973. void * (*find_peer_by_addr_and_vdev)(struct cdp_pdev *pdev,
  974. struct cdp_vdev *vdev,
  975. uint8_t *peer_addr, uint8_t *peer_id);
  976. uint16_t (*local_peer_id)(void *peer);
  977. void * (*peer_find_by_local_id)(struct cdp_pdev *pdev,
  978. uint8_t local_peer_id);
  979. QDF_STATUS (*peer_state_update)(struct cdp_pdev *pdev,
  980. uint8_t *peer_addr,
  981. enum ol_txrx_peer_state state);
  982. QDF_STATUS (*get_vdevid)(void *peer, uint8_t *vdev_id);
  983. struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev,
  984. struct qdf_mac_addr peer_addr);
  985. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr, uint8_t *peer_id);
  986. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  987. int (*get_peer_state)(void *peer);
  988. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  989. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_vdev *vdev,
  990. int16_t peer_num_delta);
  991. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  992. ol_txrx_vdev_peer_remove_cb callback,
  993. void *callback_context, bool remove_last_peer);
  994. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  995. ol_txrx_vdev_peer_remove_cb callback,
  996. void *callback_context);
  997. void (*copy_mac_addr_raw)(struct cdp_vdev *vdev, uint8_t *bss_addr);
  998. void (*add_last_real_peer)(struct cdp_pdev *pdev,
  999. struct cdp_vdev *vdev, uint8_t *peer_id);
  1000. bool (*is_vdev_restore_last_peer)(void *peer);
  1001. void (*update_last_real_peer)(struct cdp_pdev *pdev, void *vdev,
  1002. uint8_t *peer_id, bool restore_last_peer);
  1003. void (*peer_detach_force_delete)(void *peer);
  1004. void (*set_tdls_offchan_enabled)(void *peer, bool val);
  1005. void (*set_peer_as_tdls_peer)(void *peer, bool val);
  1006. };
  1007. /**
  1008. * struct cdp_ocb_ops - mcl ocb ops
  1009. * @clear_stats:
  1010. * @stats:
  1011. */
  1012. struct cdp_mob_stats_ops {
  1013. QDF_STATUS
  1014. (*clear_stats)(struct cdp_soc *soc, uint8_t bitmap);
  1015. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  1016. };
  1017. /**
  1018. * struct cdp_pmf_ops - mcl protected management frame ops
  1019. * @get_pn_info:
  1020. */
  1021. struct cdp_pmf_ops {
  1022. void (*get_pn_info)(void *peer, uint8_t **last_pn_valid,
  1023. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  1024. };
  1025. #endif
  1026. #ifdef DP_FLOW_CTL
  1027. /**
  1028. * struct cdp_cfg_ops - mcl configuration ops
  1029. * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
  1030. * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
  1031. * @cfg_attach: hardcode the configuration parameters
  1032. * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
  1033. * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
  1034. * 1 enabled, 0 disabled.
  1035. * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
  1036. * indicate that mgmt over wmi is enabled
  1037. * or not,
  1038. * 1 for enabled, 0 for disable
  1039. * @is_high_latency: get device is high or low latency device,
  1040. * 1 high latency bus, 0 low latency bus
  1041. * @set_flow_control_parameters: set flow control parameters
  1042. * @set_flow_steering: set flow_steering_enabled flag
  1043. * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
  1044. * @set_new_htt_msg_format: set new_htt_msg_format flag
  1045. * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
  1046. * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
  1047. * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
  1048. * 1 enabled, 0 disabled.
  1049. * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
  1050. * 1 enabled, 0 disabled.
  1051. */
  1052. struct cdp_cfg_ops {
  1053. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  1054. uint8_t disable_rx_fwd);
  1055. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  1056. uint8_t val);
  1057. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  1058. void (*vdev_rx_set_intrabss_fwd)(struct cdp_vdev *vdev, bool val);
  1059. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  1060. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  1061. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  1062. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  1063. void *param);
  1064. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1065. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1066. void (*set_new_htt_msg_format)(uint8_t val);
  1067. void (*set_peer_unmap_conf_support)(bool val);
  1068. bool (*get_peer_unmap_conf_support)(void);
  1069. void (*set_tx_compl_tsf64)(bool val);
  1070. bool (*get_tx_compl_tsf64)(void);
  1071. };
  1072. /**
  1073. * struct cdp_flowctl_ops - mcl flow control
  1074. * @register_pause_cb:
  1075. * @set_desc_global_pool_size:
  1076. * @dump_flow_pool_info:
  1077. */
  1078. struct cdp_flowctl_ops {
  1079. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  1080. struct cdp_pdev *pdev,
  1081. uint8_t vdev_id);
  1082. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  1083. struct cdp_pdev *pdev,
  1084. uint8_t vdev_id);
  1085. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  1086. tx_pause_callback);
  1087. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  1088. void (*dump_flow_pool_info)(void *);
  1089. bool (*tx_desc_thresh_reached)(struct cdp_vdev *vdev);
  1090. };
  1091. /**
  1092. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  1093. * @register_tx_flow_control:
  1094. * @deregister_tx_flow_control_cb:
  1095. * @flow_control_cb:
  1096. * @get_tx_resource:
  1097. * @ll_set_tx_pause_q_depth:
  1098. * @vdev_flush:
  1099. * @vdev_pause:
  1100. * @vdev_unpause:
  1101. */
  1102. struct cdp_lflowctl_ops {
  1103. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  1104. int (*register_tx_flow_control)(struct cdp_soc_t *soc,
  1105. tx_pause_callback flowcontrol);
  1106. int (*set_vdev_tx_desc_limit)(uint8_t vdev_id, uint8_t chan);
  1107. int (*set_vdev_os_queue_status)(uint8_t vdev_id,
  1108. enum netif_action_type action);
  1109. #else
  1110. int (*register_tx_flow_control)(uint8_t vdev_id,
  1111. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  1112. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  1113. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  1114. int (*deregister_tx_flow_control_cb)(uint8_t vdev_id);
  1115. void (*flow_control_cb)(struct cdp_vdev *vdev, bool tx_resume);
  1116. bool (*get_tx_resource)(struct cdp_pdev *pdev,
  1117. struct qdf_mac_addr peer_addr,
  1118. unsigned int low_watermark,
  1119. unsigned int high_watermark_offset);
  1120. int (*ll_set_tx_pause_q_depth)(uint8_t vdev_id, int pause_q_depth);
  1121. void (*vdev_flush)(struct cdp_vdev *vdev);
  1122. void (*vdev_pause)(struct cdp_vdev *vdev, uint32_t reason,
  1123. uint32_t pause_type);
  1124. void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason,
  1125. uint32_t pause_type);
  1126. };
  1127. /**
  1128. * struct cdp_ocb_ops - mcl ocb ops
  1129. * @throttle_init_period:
  1130. * @throttle_set_level:
  1131. */
  1132. struct cdp_throttle_ops {
  1133. void (*throttle_init_period)(struct cdp_pdev *pdev, int period,
  1134. uint8_t *dutycycle_level);
  1135. void (*throttle_set_level)(struct cdp_pdev *pdev, int level);
  1136. };
  1137. #endif
  1138. #ifdef IPA_OFFLOAD
  1139. /**
  1140. * struct cdp_ipa_ops - mcl ipa data path ops
  1141. * @ipa_get_resource:
  1142. * @ipa_set_doorbell_paddr:
  1143. * @ipa_set_active:
  1144. * @ipa_op_response:
  1145. * @ipa_register_op_cb:
  1146. * @ipa_get_stat:
  1147. * @ipa_tx_data_frame:
  1148. */
  1149. struct cdp_ipa_ops {
  1150. QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev);
  1151. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev);
  1152. QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active,
  1153. bool is_tx);
  1154. QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
  1155. QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev,
  1156. void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
  1157. void *usr_ctxt);
  1158. QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev);
  1159. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
  1160. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  1161. uint32_t value);
  1162. #ifdef FEATURE_METERING
  1163. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
  1164. uint8_t reset_stats);
  1165. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
  1166. uint64_t quota_bytes);
  1167. #endif
  1168. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev);
  1169. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev);
  1170. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  1171. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  1172. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  1173. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  1174. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle,
  1175. bool is_smmu_enabled, qdf_ipa_sys_connect_params_t *sys_in,
  1176. bool over_gsi);
  1177. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  1178. QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
  1179. void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
  1180. uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
  1181. uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
  1182. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  1183. QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
  1184. uint32_t rx_pipe_handle);
  1185. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  1186. qdf_ipa_client_type_t prod_client,
  1187. qdf_ipa_client_type_t cons_client,
  1188. uint8_t session_id, bool is_ipv6_enabled);
  1189. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  1190. QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev);
  1191. QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev);
  1192. QDF_STATUS (*ipa_set_perf_level)(int client,
  1193. uint32_t max_supported_bw_mbps);
  1194. bool (*ipa_rx_intrabss_fwd)(struct cdp_vdev *vdev, qdf_nbuf_t nbuf,
  1195. bool *fwd_success);
  1196. };
  1197. #endif
  1198. #ifdef DP_POWER_SAVE
  1199. /**
  1200. * struct cdp_tx_delay_ops - mcl tx delay ops
  1201. * @tx_delay:
  1202. * @tx_delay_hist:
  1203. * @tx_packet_count:
  1204. * @tx_set_compute_interval:
  1205. */
  1206. struct cdp_tx_delay_ops {
  1207. void (*tx_delay)(struct cdp_pdev *pdev, uint32_t *queue_delay_microsec,
  1208. uint32_t *tx_delay_microsec, int category);
  1209. void (*tx_delay_hist)(struct cdp_pdev *pdev,
  1210. uint16_t *bin_values, int category);
  1211. void (*tx_packet_count)(struct cdp_pdev *pdev,
  1212. uint16_t *out_packet_count,
  1213. uint16_t *out_packet_loss_count, int category);
  1214. void (*tx_set_compute_interval)(struct cdp_pdev *pdev,
  1215. uint32_t interval);
  1216. };
  1217. /**
  1218. * struct cdp_bus_ops - mcl bus suspend/resume ops
  1219. * @bus_suspend:
  1220. * @bus_resume:
  1221. */
  1222. struct cdp_bus_ops {
  1223. QDF_STATUS (*bus_suspend)(struct cdp_pdev *opaque_pdev);
  1224. QDF_STATUS (*bus_resume)(struct cdp_pdev *opaque_pdev);
  1225. };
  1226. #endif
  1227. #ifdef RECEIVE_OFFLOAD
  1228. /**
  1229. * struct cdp_rx_offld_ops - mcl host receive offload ops
  1230. * @register_rx_offld_flush_cb:
  1231. * @deregister_rx_offld_flush_cb:
  1232. */
  1233. struct cdp_rx_offld_ops {
  1234. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1235. void (*deregister_rx_offld_flush_cb)(void);
  1236. };
  1237. #endif
  1238. struct cdp_ops {
  1239. struct cdp_cmn_ops *cmn_drv_ops;
  1240. struct cdp_ctrl_ops *ctrl_ops;
  1241. struct cdp_me_ops *me_ops;
  1242. struct cdp_mon_ops *mon_ops;
  1243. struct cdp_host_stats_ops *host_stats_ops;
  1244. struct cdp_wds_ops *wds_ops;
  1245. struct cdp_raw_ops *raw_ops;
  1246. struct cdp_pflow_ops *pflow_ops;
  1247. #ifdef DP_PEER_EXTENDED_API
  1248. struct cdp_misc_ops *misc_ops;
  1249. struct cdp_peer_ops *peer_ops;
  1250. struct cdp_ocb_ops *ocb_ops;
  1251. struct cdp_mob_stats_ops *mob_stats_ops;
  1252. struct cdp_pmf_ops *pmf_ops;
  1253. #endif
  1254. #ifdef DP_FLOW_CTL
  1255. struct cdp_cfg_ops *cfg_ops;
  1256. struct cdp_flowctl_ops *flowctl_ops;
  1257. struct cdp_lflowctl_ops *l_flowctl_ops;
  1258. struct cdp_throttle_ops *throttle_ops;
  1259. #endif
  1260. #ifdef DP_POWER_SAVE
  1261. struct cdp_bus_ops *bus_ops;
  1262. struct cdp_tx_delay_ops *delay_ops;
  1263. #endif
  1264. #ifdef IPA_OFFLOAD
  1265. struct cdp_ipa_ops *ipa_ops;
  1266. #endif
  1267. #ifdef RECEIVE_OFFLOAD
  1268. struct cdp_rx_offld_ops *rx_offld_ops;
  1269. #endif
  1270. };
  1271. #endif