cdp_txrx_ops.h 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664
  1. /*
  2. * Copyright (c) 2017-2020 The Linux Foundation. All rights reserved.
  3. *
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * @file cdp_txrx_ops.h
  21. * @brief Define the host data path converged API functions
  22. * called by the host control SW and the OS interface module
  23. */
  24. #ifndef _CDP_TXRX_CMN_OPS_H_
  25. #define _CDP_TXRX_CMN_OPS_H_
  26. #include <cdp_txrx_cmn_struct.h>
  27. #include <cdp_txrx_stats_struct.h>
  28. #include "cdp_txrx_handle.h"
  29. #include <cdp_txrx_mon_struct.h>
  30. #include "wlan_objmgr_psoc_obj.h"
  31. #include <wmi_unified_api.h>
  32. #include <wdi_event_api.h>
  33. #ifdef IPA_OFFLOAD
  34. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  35. #include <qdf_ipa_wdi3.h>
  36. #else
  37. #include <qdf_ipa.h>
  38. #endif
  39. #endif
  40. /**
  41. * bitmap values to indicate special handling of peer_delete
  42. */
  43. #define CDP_PEER_DELETE_NO_SPECIAL 0
  44. #define CDP_PEER_DO_NOT_START_UNMAP_TIMER 1
  45. struct hif_opaque_softc;
  46. /* same as ieee80211_nac_param */
  47. enum cdp_nac_param_cmd {
  48. /* IEEE80211_NAC_PARAM_ADD */
  49. CDP_NAC_PARAM_ADD = 1,
  50. /* IEEE80211_NAC_PARAM_DEL */
  51. CDP_NAC_PARAM_DEL,
  52. /* IEEE80211_NAC_PARAM_LIST */
  53. CDP_NAC_PARAM_LIST,
  54. };
  55. /**
  56. * enum vdev_peer_protocol_enter_exit - whether ingress or egress
  57. * @CDP_VDEV_PEER_PROTOCOL_IS_INGRESS: ingress
  58. * @CDP_VDEV_PEER_PROTOCOL_IS_EGRESS: egress
  59. *
  60. * whether ingress or egress
  61. */
  62. enum vdev_peer_protocol_enter_exit {
  63. CDP_VDEV_PEER_PROTOCOL_IS_INGRESS,
  64. CDP_VDEV_PEER_PROTOCOL_IS_EGRESS
  65. };
  66. /**
  67. * enum vdev_peer_protocol_tx_rx - whether tx or rx
  68. * @CDP_VDEV_PEER_PROTOCOL_IS_TX: tx
  69. * @CDP_VDEV_PEER_PROTOCOL_IS_RX: rx
  70. *
  71. * whether tx or rx
  72. */
  73. enum vdev_peer_protocol_tx_rx {
  74. CDP_VDEV_PEER_PROTOCOL_IS_TX,
  75. CDP_VDEV_PEER_PROTOCOL_IS_RX
  76. };
  77. /******************************************************************************
  78. *
  79. * Control Interface (A Interface)
  80. *
  81. *****************************************************************************/
  82. struct cdp_cmn_ops {
  83. QDF_STATUS (*txrx_soc_attach_target)(ol_txrx_soc_handle soc);
  84. int (*txrx_pdev_attach_target)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  85. QDF_STATUS (*txrx_vdev_attach)
  86. (struct cdp_soc_t *soc, uint8_t pdev_id, uint8_t *mac,
  87. uint8_t vdev_id, enum wlan_op_mode op_mode,
  88. enum wlan_op_subtype subtype);
  89. QDF_STATUS
  90. (*txrx_vdev_detach)(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
  91. ol_txrx_vdev_delete_cb callback,
  92. void *cb_context);
  93. QDF_STATUS (*txrx_pdev_attach)
  94. (ol_txrx_soc_handle soc, HTC_HANDLE htc_pdev,
  95. qdf_device_t osdev, uint8_t pdev_id);
  96. int (*txrx_pdev_post_attach)(struct cdp_soc_t *soc, uint8_t pdev_id);
  97. void
  98. (*txrx_pdev_pre_detach)(struct cdp_soc_t *soc, uint8_t pdev_id,
  99. int force);
  100. QDF_STATUS
  101. (*txrx_pdev_detach)(struct cdp_soc_t *psoc, uint8_t pdev_id,
  102. int force);
  103. /**
  104. * txrx_pdev_deinit() - Deinitialize pdev and dp ring memory
  105. * @soc: soc dp handle
  106. * @pdev_id: id of Dp pdev handle
  107. * @force: Force deinit or not
  108. *
  109. * Return: QDF_STATUS
  110. */
  111. QDF_STATUS
  112. (*txrx_pdev_deinit)(struct cdp_soc_t *soc, uint8_t pdev_id,
  113. int force);
  114. QDF_STATUS
  115. (*txrx_peer_create)
  116. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  117. uint8_t *peer_mac_addr);
  118. QDF_STATUS
  119. (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  120. uint8_t *peer_mac);
  121. QDF_STATUS
  122. (*txrx_cp_peer_del_response)
  123. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  124. uint8_t *peer_mac_addr);
  125. QDF_STATUS
  126. (*txrx_peer_teardown)
  127. (struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *peer_mac);
  128. int (*txrx_peer_add_ast)
  129. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  130. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
  131. uint32_t flags);
  132. int (*txrx_peer_update_ast)
  133. (ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *peer_mac,
  134. uint8_t *mac_addr, uint32_t flags);
  135. bool (*txrx_peer_get_ast_info_by_soc)
  136. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  137. struct cdp_ast_entry_info *ast_entry_info);
  138. bool (*txrx_peer_get_ast_info_by_pdev)
  139. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  140. uint8_t pdev_id,
  141. struct cdp_ast_entry_info *ast_entry_info);
  142. QDF_STATUS (*txrx_peer_ast_delete_by_soc)
  143. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  144. txrx_ast_free_cb callback,
  145. void *cookie);
  146. QDF_STATUS (*txrx_peer_ast_delete_by_pdev)
  147. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  148. uint8_t pdev_id,
  149. txrx_ast_free_cb callback,
  150. void *cookie);
  151. QDF_STATUS
  152. (*txrx_peer_delete)(struct cdp_soc_t *soc, uint8_t vdev_id,
  153. uint8_t *peer_mac, uint32_t bitmap);
  154. QDF_STATUS (*txrx_set_monitor_mode)(struct cdp_soc_t *soc,
  155. uint8_t vdev_id,
  156. uint8_t smart_monitor);
  157. void (*txrx_peer_delete_sync)(struct cdp_soc_t *soc, uint8_t vdev_id,
  158. uint8_t *peer_mac,
  159. QDF_STATUS(*delete_cb)(
  160. uint8_t vdev_id,
  161. uint32_t peerid_cnt,
  162. uint16_t *peerid_list),
  163. uint32_t bitmap);
  164. void (*txrx_peer_unmap_sync_cb_set)(struct cdp_soc_t *soc_hdl,
  165. uint8_t pdev_id,
  166. ol_txrx_peer_unmap_sync_cb
  167. peer_unmap_sync);
  168. QDF_STATUS
  169. (*txrx_get_peer_mac_from_peer_id)
  170. (struct cdp_soc_t *cdp_soc,
  171. uint32_t peer_id, uint8_t *peer_mac);
  172. void
  173. (*txrx_vdev_tx_lock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  174. void
  175. (*txrx_vdev_tx_unlock)(struct cdp_soc_t *soc, uint8_t vdev_id);
  176. QDF_STATUS
  177. (*txrx_ath_getstats)(struct cdp_soc_t *soc, uint8_t id,
  178. struct cdp_dev_stats *stats, uint8_t type);
  179. QDF_STATUS
  180. (*txrx_set_gid_flag)(struct cdp_soc_t *soc, uint8_t pdev_id,
  181. u_int8_t *mem_status,
  182. u_int8_t *user_position);
  183. uint32_t (*txrx_fw_supported_enh_stats_version)(struct cdp_soc_t *soc,
  184. uint8_t pdev_id);
  185. QDF_STATUS
  186. (*txrx_if_mgmt_drain)(struct cdp_soc_t *soc, uint8_t pdev_id,
  187. int force);
  188. QDF_STATUS
  189. (*txrx_set_curchan)(struct cdp_soc_t *soc, uint8_t pdev_id,
  190. uint32_t chan_mhz);
  191. QDF_STATUS
  192. (*txrx_set_privacy_filters)
  193. (struct cdp_soc_t *soc, uint8_t vdev_id, void *filter,
  194. uint32_t num);
  195. uint32_t (*txrx_get_cfg)(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg);
  196. /********************************************************************
  197. * Data Interface (B Interface)
  198. ********************************************************************/
  199. QDF_STATUS
  200. (*txrx_vdev_register)(struct cdp_soc_t *soc, uint8_t vdev_id,
  201. ol_osif_vdev_handle osif_vdev,
  202. struct ol_txrx_ops *txrx_ops);
  203. int (*txrx_mgmt_send)(struct cdp_soc_t *soc, uint8_t vdev_id,
  204. qdf_nbuf_t tx_mgmt_frm, uint8_t type);
  205. int (*txrx_mgmt_send_ext)(struct cdp_soc_t *soc, uint8_t vdev_id,
  206. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  207. uint8_t use_6mbps, uint16_t chanfreq);
  208. /**
  209. * ol_txrx_mgmt_tx_cb - tx management delivery notification
  210. * callback function
  211. */
  212. QDF_STATUS
  213. (*txrx_mgmt_tx_cb_set)(struct cdp_soc_t *soc, uint8_t pdev_id,
  214. uint8_t type,
  215. ol_txrx_mgmt_tx_cb download_cb,
  216. ol_txrx_mgmt_tx_cb ota_ack_cb,
  217. void *ctxt);
  218. /**
  219. * ol_txrx_data_tx_cb - Function registered with the data path
  220. * that is called when tx frames marked as "no free" are
  221. * done being transmitted
  222. */
  223. void (*txrx_data_tx_cb_set)(struct cdp_soc_t *soc, uint8_t vdev_id,
  224. ol_txrx_data_tx_cb callback, void *ctxt);
  225. qdf_nbuf_t (*tx_send_exc)
  226. (ol_txrx_soc_handle soc, uint8_t vdev_id, qdf_nbuf_t msdu_list,
  227. struct cdp_tx_exception_metadata *tx_exc_metadata);
  228. /*******************************************************************
  229. * Statistics and Debugging Interface (C Interface)
  230. ********************************************************************/
  231. int (*txrx_aggr_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  232. int max_subfrms_ampdu,
  233. int max_subfrms_amsdu);
  234. A_STATUS
  235. (*txrx_fw_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  236. struct ol_txrx_stats_req *req,
  237. bool per_vdev, bool response_expected);
  238. int (*txrx_debug)(struct cdp_soc_t *soc, uint8_t vdev_id,
  239. int debug_specs);
  240. QDF_STATUS
  241. (*txrx_fw_stats_cfg)(struct cdp_soc_t *soc, uint8_t vdev_id,
  242. uint8_t cfg_stats_type, uint32_t cfg_val);
  243. void (*txrx_print_level_set)(unsigned level);
  244. /**
  245. * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
  246. * @soc: datapath soc handle
  247. * @vdev_id: vdev id
  248. *
  249. * Return: vdev mac address
  250. */
  251. uint8_t * (*txrx_get_vdev_mac_addr)(struct cdp_soc_t *soc,
  252. uint8_t vdev_id);
  253. /**
  254. * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  255. * @soc: datapath soc handle
  256. * @vdev_id: vdev id
  257. *
  258. * Return: Handle to control pdev
  259. */
  260. struct cdp_cfg *(*txrx_get_ctrl_pdev_from_vdev)(struct cdp_soc_t *soc,
  261. uint8_t vdev_id);
  262. /**
  263. * txrx_get_mon_vdev_from_pdev() - Return monitor mode vdev
  264. * @soc: datapath soc handle
  265. * @pdev: pdev id
  266. *
  267. * Return: vdev_id
  268. */
  269. uint8_t (*txrx_get_mon_vdev_from_pdev)(struct cdp_soc_t *soc,
  270. uint8_t pdev_id);
  271. void (*txrx_soc_detach)(struct cdp_soc_t *soc);
  272. /**
  273. * txrx_soc_deinit() - Deinitialize dp soc and dp ring memory
  274. * @soc: Opaque Dp handle
  275. *
  276. * Return None
  277. */
  278. void (*txrx_soc_deinit)(struct cdp_soc_t *soc);
  279. /**
  280. * txrx_soc_init() - Initialize dp soc and dp ring memory
  281. * @soc: Opaque Dp handle
  282. * @ctrl_psoc: Opaque Cp handle
  283. * @htchdl: Opaque htc handle
  284. * @hifhdl: Opaque hif handle
  285. *
  286. * Return: None
  287. */
  288. void *(*txrx_soc_init)(struct cdp_soc_t *soc,
  289. struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  290. struct hif_opaque_softc *hif_handle,
  291. HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
  292. struct ol_if_ops *ol_ops, uint16_t device_id);
  293. QDF_STATUS (*txrx_pdev_init)(ol_txrx_soc_handle soc,
  294. HTC_HANDLE htc_handle,
  295. qdf_device_t qdf_osdev,
  296. uint8_t pdev_id);
  297. /**
  298. * txrx_tso_soc_attach() - TSO attach handler triggered during
  299. * dynamic tso activation
  300. * @soc: Opaque Dp handle
  301. *
  302. * Return: QDF status
  303. */
  304. QDF_STATUS (*txrx_tso_soc_attach)(struct cdp_soc_t *soc);
  305. /**
  306. * txrx_tso_soc_detach() - TSO detach handler triggered during
  307. * dynamic tso de-activation
  308. * @soc: Opaque Dp handle
  309. *
  310. * Return: QDF status
  311. */
  312. QDF_STATUS (*txrx_tso_soc_detach)(struct cdp_soc_t *soc);
  313. int (*addba_resp_tx_completion)(struct cdp_soc_t *cdp_soc,
  314. uint8_t *peer_mac,
  315. uint16_t vdev_id, uint8_t tid,
  316. int status);
  317. int (*addba_requestprocess)(struct cdp_soc_t *cdp_soc,
  318. uint8_t *peer_mac,
  319. uint16_t vdev_id,
  320. uint8_t dialogtoken,
  321. uint16_t tid, uint16_t batimeout,
  322. uint16_t buffersize,
  323. uint16_t startseqnum);
  324. QDF_STATUS
  325. (*addba_responsesetup)(struct cdp_soc_t *cdp_soc,
  326. uint8_t *peer_mac,
  327. uint16_t vdev_id, uint8_t tid,
  328. uint8_t *dialogtoken, uint16_t *statuscode,
  329. uint16_t *buffersize, uint16_t *batimeout);
  330. int (*delba_process)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  331. uint16_t vdev_id, int tid, uint16_t reasoncode);
  332. /**
  333. * delba_tx_completion() - Indicate delba tx status
  334. * @cdp_soc: soc handle
  335. * @peer_mac: Peer mac address
  336. * @vdev_id: vdev id
  337. * @tid: Tid number
  338. * @status: Tx completion status
  339. *
  340. * Return: 0 on Success, 1 on failure
  341. */
  342. int (*delba_tx_completion)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  343. uint16_t vdev_id,
  344. uint8_t tid, int status);
  345. QDF_STATUS
  346. (*set_addba_response)(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
  347. uint16_t vdev_id, uint8_t tid,
  348. uint16_t statuscode);
  349. QDF_STATUS
  350. (*set_vdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  351. uint8_t vdev_id, uint8_t map_id);
  352. int (*txrx_get_total_per)(struct cdp_soc_t *soc, uint8_t pdev_id);
  353. void (*flush_cache_rx_queue)(void);
  354. QDF_STATUS (*set_pdev_dscp_tid_map)(struct cdp_soc_t *soc_handle,
  355. uint8_t pdev_id,
  356. uint8_t map_id,
  357. uint8_t tos, uint8_t tid);
  358. QDF_STATUS (*txrx_stats_request)(struct cdp_soc_t *soc_handle,
  359. uint8_t vdev_id,
  360. struct cdp_txrx_stats_req *req);
  361. QDF_STATUS (*display_stats)(struct cdp_soc_t *psoc, uint16_t value,
  362. enum qdf_stats_verbosity_level level);
  363. QDF_STATUS (*txrx_intr_attach)(struct cdp_soc_t *soc_handle);
  364. void (*txrx_intr_detach)(struct cdp_soc_t *soc_handle);
  365. QDF_STATUS (*set_pn_check)(struct cdp_soc_t *soc_handle,
  366. uint8_t vdev_id, uint8_t *peermac,
  367. enum cdp_sec_type sec_type,
  368. uint32_t *rx_pn);
  369. QDF_STATUS(*set_key_sec_type)(struct cdp_soc_t *soc_handle,
  370. uint8_t vdev_id, uint8_t *peermac,
  371. enum cdp_sec_type sec_type,
  372. bool is_unicast);
  373. QDF_STATUS (*update_config_parameters)(struct cdp_soc *psoc,
  374. struct cdp_config_params *params);
  375. void *(*get_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id);
  376. void (*set_dp_txrx_handle)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  377. void *dp_hdl);
  378. void *(*get_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  379. uint8_t vdev_id);
  380. QDF_STATUS (*set_vdev_dp_ext_txrx_handle)(struct cdp_soc_t *soc,
  381. uint8_t vdev_id,
  382. uint16_t size);
  383. void *(*get_soc_dp_txrx_handle)(struct cdp_soc *soc_handle);
  384. void (*set_soc_dp_txrx_handle)(struct cdp_soc *soc_handle,
  385. void *dp_txrx_handle);
  386. QDF_STATUS (*map_pdev_to_lmac)(ol_txrx_soc_handle soc, uint8_t pdev_id,
  387. uint32_t lmac_id);
  388. QDF_STATUS (*handle_mode_change)(ol_txrx_soc_handle soc,
  389. uint8_t pdev_id, uint32_t lmac_id);
  390. QDF_STATUS (*set_pdev_status_down)(struct cdp_soc_t *soc_handle,
  391. uint8_t pdev_id, bool is_pdev_down);
  392. QDF_STATUS (*txrx_peer_reset_ast)
  393. (ol_txrx_soc_handle soc, uint8_t *ast_macaddr,
  394. uint8_t *peer_macaddr, uint8_t vdev_id);
  395. QDF_STATUS (*txrx_peer_reset_ast_table)(ol_txrx_soc_handle soc,
  396. uint8_t vdev_id);
  397. void (*txrx_peer_flush_ast_table)(ol_txrx_soc_handle soc);
  398. void (*txrx_set_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  399. uint8_t ac, uint32_t value);
  400. void (*txrx_get_ba_aging_timeout)(struct cdp_soc_t *soc_handle,
  401. uint8_t ac, uint32_t *value);
  402. QDF_STATUS (*txrx_peer_map_attach)(ol_txrx_soc_handle soc,
  403. uint32_t num_peers,
  404. uint32_t max_ast_index,
  405. bool peer_map_unmap_v2);
  406. QDF_STATUS (*set_soc_param)(ol_txrx_soc_handle soc,
  407. enum cdp_soc_param_t param,
  408. uint32_t value);
  409. ol_txrx_tx_fp tx_send;
  410. /**
  411. * txrx_get_os_rx_handles_from_vdev() - Return function, osif vdev
  412. * to deliver pkt to stack.
  413. * @soc: datapath soc handle
  414. * @vdev: vdev id
  415. * @stack_fn: pointer to - function pointer to deliver RX pkt to stack
  416. * @osif_vdev: pointer to - osif vdev to deliver RX packet to.
  417. */
  418. void (*txrx_get_os_rx_handles_from_vdev)
  419. (ol_txrx_soc_handle soc,
  420. uint8_t vdev_id,
  421. ol_txrx_rx_fp *stack_fn,
  422. ol_osif_vdev_handle *osif_vdev);
  423. void (*set_rate_stats_ctx)(struct cdp_soc_t *soc,
  424. void *ctx);
  425. int (*txrx_classify_update)
  426. (struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t skb,
  427. enum txrx_direction, struct ol_txrx_nbuf_classify *nbuf_class);
  428. bool (*get_dp_capabilities)(struct cdp_soc_t *soc,
  429. enum cdp_capabilities dp_caps);
  430. void* (*get_rate_stats_ctx)(struct cdp_soc_t *soc);
  431. QDF_STATUS (*txrx_peer_flush_rate_stats)(struct cdp_soc_t *soc,
  432. uint8_t pdev_id,
  433. void *buf);
  434. void* (*txrx_peer_get_wlan_stats_ctx)(struct cdp_soc_t *soc,
  435. uint8_t vdev_id,
  436. uint8_t *mac_addr);
  437. QDF_STATUS (*txrx_flush_rate_stats_request)(struct cdp_soc_t *soc,
  438. uint8_t pdev_id);
  439. QDF_STATUS (*set_pdev_pcp_tid_map)(struct cdp_soc_t *soc,
  440. uint8_t pdev_id,
  441. uint8_t pcp, uint8_t tid);
  442. QDF_STATUS (*set_vdev_pcp_tid_map)(struct cdp_soc_t *soc,
  443. uint8_t vdev_id,
  444. uint8_t pcp, uint8_t tid);
  445. #ifdef QCA_MULTIPASS_SUPPORT
  446. QDF_STATUS (*set_vlan_groupkey)(struct cdp_soc_t *soc, uint8_t vdev_id,
  447. uint16_t vlan_id, uint16_t group_key);
  448. #endif
  449. uint16_t (*get_peer_mac_list)
  450. (ol_txrx_soc_handle soc, uint8_t vdev_id,
  451. u_int8_t newmac[][QDF_MAC_ADDR_SIZE], uint16_t mac_cnt);
  452. };
  453. struct cdp_ctrl_ops {
  454. int
  455. (*txrx_mempools_attach)(ol_txrx_soc_handle dp_soc);
  456. int
  457. (*txrx_update_filter_neighbour_peers)(
  458. struct cdp_soc_t *soc, uint8_t vdev_id,
  459. uint32_t cmd, uint8_t *macaddr);
  460. /* Is this similar to ol_txrx_peer_state_update() in MCL */
  461. /**
  462. * @brief Update the authorize peer object at association time
  463. * @details
  464. * For the host-based implementation of rate-control, it
  465. * updates the peer/node-related parameters within rate-control
  466. * context of the peer at association.
  467. *
  468. * @param soc_hdl - pointer to the soc object
  469. * @param vdev_id - id of the virtual object
  470. * @param peer_mac - mac address of the node's object
  471. * @authorize - either to authorize or unauthorize peer
  472. *
  473. * @return QDF_STATUS
  474. */
  475. QDF_STATUS
  476. (*txrx_peer_authorize)(struct cdp_soc_t *soc_hdl,
  477. uint8_t vdev_id,
  478. uint8_t *peer_mac,
  479. u_int32_t authorize);
  480. void (*tx_flush_buffers)(struct cdp_soc_t *soc, uint8_t vdev_id);
  481. int (*txrx_is_target_ar900b)(struct cdp_soc_t *soc_hdl);
  482. QDF_STATUS
  483. (*txrx_set_vdev_param)(struct cdp_soc_t *soc, uint8_t vdev_id,
  484. enum cdp_vdev_param_type param,
  485. cdp_config_param_type val);
  486. /**
  487. * @brief Set the reo dest ring num of the radio
  488. * @details
  489. * Set the reo destination ring no on which we will receive
  490. * pkts for this radio.
  491. *
  492. * @txrx_soc - soc handle
  493. * @param pdev_id - id of physical device
  494. * @return the reo destination ring number
  495. * @param reo_dest_ring_num - value ranges between 1 - 4
  496. */
  497. QDF_STATUS (*txrx_set_pdev_reo_dest)(
  498. struct cdp_soc_t *txrx_soc,
  499. uint8_t pdev_id,
  500. enum cdp_host_reo_dest_ring reo_dest_ring_num);
  501. /**
  502. * @brief Get the reo dest ring num of the radio
  503. * @details
  504. * Get the reo destination ring no on which we will receive
  505. * pkts for this radio.
  506. *
  507. * @txrx_soc - soc handle
  508. * @param pdev_id - id of physical device
  509. * @return the reo destination ring number
  510. */
  511. enum cdp_host_reo_dest_ring (*txrx_get_pdev_reo_dest)(
  512. struct cdp_soc_t *txrx_soc,
  513. uint8_t pdev_id);
  514. int (*txrx_wdi_event_sub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  515. wdi_event_subscribe *event_cb_sub,
  516. uint32_t event);
  517. int (*txrx_wdi_event_unsub)(struct cdp_soc_t *soc, uint8_t pdev_id,
  518. wdi_event_subscribe *event_cb_sub,
  519. uint32_t event);
  520. int (*txrx_get_sec_type)(ol_txrx_soc_handle soc, uint8_t vdev_id,
  521. uint8_t *peer_mac, uint8_t sec_idx);
  522. QDF_STATUS
  523. (*txrx_update_mgmt_txpow_vdev)(struct cdp_soc_t *soc,
  524. uint8_t vdev_id,
  525. uint8_t subtype, uint8_t tx_power);
  526. /**
  527. * txrx_set_pdev_param() - callback to set pdev parameter
  528. * @soc: opaque soc handle
  529. * @pdev_id:id of data path pdev handle
  530. * @val: value of pdev_tx_capture
  531. *
  532. * Return: status: 0 - Success, non-zero: Failure
  533. */
  534. QDF_STATUS (*txrx_set_pdev_param)(struct cdp_soc_t *soc,
  535. uint8_t pdev_id,
  536. enum cdp_pdev_param_type type,
  537. cdp_config_param_type val);
  538. QDF_STATUS (*txrx_get_pdev_param)(struct cdp_soc_t *soc,
  539. uint8_t pdev_id,
  540. enum cdp_pdev_param_type type,
  541. cdp_config_param_type *val);
  542. QDF_STATUS (*txrx_set_peer_param)(struct cdp_soc_t *soc,
  543. uint8_t vdev_id, uint8_t *peer_mac,
  544. enum cdp_peer_param_type param,
  545. cdp_config_param_type val);
  546. QDF_STATUS (*txrx_get_peer_param)(struct cdp_soc_t *soc,
  547. uint8_t vdev_id, uint8_t *peer_mac,
  548. enum cdp_peer_param_type param,
  549. cdp_config_param_type *val);
  550. void * (*txrx_get_pldev)(struct cdp_soc_t *soc, uint8_t pdev_id);
  551. #ifdef VDEV_PEER_PROTOCOL_COUNT
  552. void (*txrx_peer_protocol_cnt)(struct cdp_soc_t *soc,
  553. int8_t vdev_id,
  554. qdf_nbuf_t nbuf,
  555. bool is_egress,
  556. bool is_rx);
  557. #endif
  558. #ifdef ATH_SUPPORT_NAC_RSSI
  559. QDF_STATUS (*txrx_vdev_config_for_nac_rssi)(struct cdp_soc_t *cdp_soc,
  560. uint8_t vdev_id,
  561. enum cdp_nac_param_cmd cmd,
  562. char *bssid,
  563. char *client_macaddr,
  564. uint8_t chan_num);
  565. QDF_STATUS (*txrx_vdev_get_neighbour_rssi)(struct cdp_soc_t *cdp_soc,
  566. uint8_t vdev_id,
  567. char *macaddr,
  568. uint8_t *rssi);
  569. #endif
  570. QDF_STATUS
  571. (*txrx_record_mscs_params) (
  572. struct cdp_soc_t *soc, uint8_t *macaddr,
  573. uint8_t vdev_id,
  574. struct cdp_mscs_params *mscs_params,
  575. bool active);
  576. QDF_STATUS
  577. (*set_key)(struct cdp_soc_t *soc, uint8_t vdev_id, uint8_t *mac,
  578. bool is_unicast, uint32_t *key);
  579. QDF_STATUS (*txrx_get_vdev_param)(struct cdp_soc_t *soc,
  580. uint8_t vdev_id,
  581. enum cdp_vdev_param_type param,
  582. cdp_config_param_type *val);
  583. int (*enable_peer_based_pktlog)(struct cdp_soc_t *cdp_soc,
  584. uint8_t pdev_id,
  585. uint8_t *macaddr, uint8_t enb_dsb);
  586. QDF_STATUS
  587. (*calculate_delay_stats)(struct cdp_soc_t *cdp_soc,
  588. uint8_t vdev_id, qdf_nbuf_t nbuf);
  589. #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  590. QDF_STATUS (*txrx_update_pdev_rx_protocol_tag)(
  591. struct cdp_soc_t *soc, uint8_t pdev_id,
  592. uint32_t protocol_mask, uint16_t protocol_type,
  593. uint16_t tag);
  594. #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
  595. void (*txrx_dump_pdev_rx_protocol_tag_stats)(
  596. struct cdp_soc_t *soc, uint8_t pdev_id,
  597. uint16_t protocol_type);
  598. #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
  599. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  600. #ifdef WLAN_SUPPORT_RX_FLOW_TAG
  601. QDF_STATUS (*txrx_set_rx_flow_tag)(
  602. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  603. struct cdp_rx_flow_info *flow_info);
  604. QDF_STATUS (*txrx_dump_rx_flow_tag_stats)(
  605. struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
  606. struct cdp_rx_flow_info *flow_info);
  607. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  608. #ifdef QCA_MULTIPASS_SUPPORT
  609. void (*txrx_peer_set_vlan_id)(struct cdp_soc_t *cdp_soc,
  610. uint8_t vdev_id, uint8_t *peer_mac,
  611. uint16_t vlan_id);
  612. #endif
  613. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
  614. QDF_STATUS (*txrx_update_peer_pkt_capture_params)(
  615. ol_txrx_soc_handle soc, uint8_t pdev_id,
  616. bool is_rx_pkt_cap_enable, uint8_t is_tx_pkt_cap_enable,
  617. uint8_t *peer_mac);
  618. #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
  619. QDF_STATUS
  620. (*txrx_set_psoc_param)(struct cdp_soc_t *soc,
  621. enum cdp_psoc_param_type param,
  622. cdp_config_param_type val);
  623. QDF_STATUS (*txrx_get_psoc_param)(ol_txrx_soc_handle soc,
  624. enum cdp_psoc_param_type type,
  625. cdp_config_param_type *val);
  626. #ifdef VDEV_PEER_PROTOCOL_COUNT
  627. /*
  628. * Enable per-peer protocol counters
  629. */
  630. void (*txrx_enable_peer_protocol_count)(struct cdp_soc_t *soc,
  631. int8_t vdev_id, bool enable);
  632. void (*txrx_set_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  633. int8_t vdev_id, int mask);
  634. int (*txrx_is_peer_protocol_count_enabled)(struct cdp_soc_t *soc,
  635. int8_t vdev_id);
  636. int (*txrx_get_peer_protocol_drop_mask)(struct cdp_soc_t *soc,
  637. int8_t vdev_id);
  638. #endif
  639. };
  640. struct cdp_me_ops {
  641. void (*tx_me_alloc_descriptor)(struct cdp_soc_t *soc,
  642. uint8_t pdev_id);
  643. void (*tx_me_free_descriptor)(struct cdp_soc_t *soc, uint8_t pdev_id);
  644. uint16_t (*tx_me_convert_ucast)(struct cdp_soc_t *soc, uint8_t vdev_id,
  645. qdf_nbuf_t wbuf, u_int8_t newmac[][6],
  646. uint8_t newmaccnt);
  647. };
  648. struct cdp_mon_ops {
  649. QDF_STATUS (*txrx_reset_monitor_mode)
  650. (ol_txrx_soc_handle soc, uint8_t pdev_id, u_int8_t smart_monitor);
  651. QDF_STATUS (*txrx_deliver_tx_mgmt)
  652. (struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf);
  653. /* HK advance monitor filter support */
  654. QDF_STATUS (*txrx_set_advance_monitor_filter)
  655. (struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  656. struct cdp_monitor_filter *filter_val);
  657. /* Configure full monitor mode */
  658. QDF_STATUS
  659. (*config_full_mon_mode)(struct cdp_soc_t *soc, uint8_t val);
  660. };
  661. struct cdp_host_stats_ops {
  662. int (*txrx_host_stats_get)(struct cdp_soc_t *soc, uint8_t vdev_id,
  663. struct ol_txrx_stats_req *req);
  664. QDF_STATUS (*txrx_host_stats_clr)(struct cdp_soc_t *soc,
  665. uint8_t vdev_id);
  666. QDF_STATUS
  667. (*txrx_host_ce_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  668. int (*txrx_stats_publish)(struct cdp_soc_t *soc, uint8_t pdev_id,
  669. struct cdp_stats_extd *buf);
  670. /**
  671. * @brief Enable enhanced stats functionality.
  672. *
  673. * @param soc - the soc handle
  674. * @param pdev_id - pdev_id of pdev
  675. * @return - QDF_STATUS
  676. */
  677. QDF_STATUS (*txrx_enable_enhanced_stats)(struct cdp_soc_t *soc,
  678. uint8_t pdev_id);
  679. /**
  680. * @brief Disable enhanced stats functionality.
  681. *
  682. * @param soc - the soc handle
  683. * @param pdev_id - pdev_id of pdev
  684. * @return - QDF_STATUS
  685. */
  686. QDF_STATUS (*txrx_disable_enhanced_stats)(struct cdp_soc_t *soc,
  687. uint8_t pdev_id);
  688. QDF_STATUS
  689. (*tx_print_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  690. QDF_STATUS
  691. (*tx_rst_tso_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  692. QDF_STATUS
  693. (*tx_print_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  694. QDF_STATUS
  695. (*tx_rst_sg_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  696. QDF_STATUS
  697. (*print_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  698. QDF_STATUS
  699. (*rst_rx_cksum_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  700. QDF_STATUS
  701. (*txrx_host_me_stats)(struct cdp_soc_t *soc, uint8_t vdev_id);
  702. QDF_STATUS
  703. (*txrx_per_peer_stats)(struct cdp_soc_t *soc, uint8_t *addr);
  704. int (*txrx_host_msdu_ttl_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  705. struct ol_txrx_stats_req *req);
  706. int (*ol_txrx_update_peer_stats)(struct cdp_soc_t *soc,
  707. uint8_t pdev_id,
  708. uint8_t *addr, void *stats,
  709. uint32_t last_tx_rate_mcs,
  710. uint32_t stats_id);
  711. QDF_STATUS
  712. (*get_fw_peer_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  713. uint8_t *addr,
  714. uint32_t cap, uint32_t copy_stats);
  715. QDF_STATUS
  716. (*get_htt_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  717. void *data,
  718. uint32_t data_len);
  719. QDF_STATUS
  720. (*txrx_update_pdev_stats)(struct cdp_soc_t *soc,
  721. uint8_t pdev_id, void *data,
  722. uint16_t stats_id);
  723. QDF_STATUS
  724. (*txrx_get_peer_stats_param)(struct cdp_soc_t *soc,
  725. uint8_t vdev_id,
  726. uint8_t *peer_mac,
  727. enum cdp_peer_stats_type type,
  728. cdp_peer_stats_param_t *buf);
  729. QDF_STATUS
  730. (*txrx_get_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  731. uint8_t *peer_mac,
  732. struct cdp_peer_stats *peer_stats);
  733. QDF_STATUS
  734. (*txrx_reset_peer_ald_stats)(struct cdp_soc_t *soc,
  735. uint8_t vdev_id,
  736. uint8_t *peer_mac);
  737. QDF_STATUS
  738. (*txrx_reset_peer_stats)(struct cdp_soc_t *soc,
  739. uint8_t vdev_id, uint8_t *peer_mac);
  740. int
  741. (*txrx_get_vdev_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  742. void *buf, bool is_aggregate);
  743. int
  744. (*txrx_process_wmi_host_vdev_stats)(ol_txrx_soc_handle soc,
  745. void *data, uint32_t len,
  746. uint32_t stats_id);
  747. int
  748. (*txrx_get_vdev_extd_stats)(struct cdp_soc_t *soc,
  749. uint8_t vdev_id,
  750. wmi_host_vdev_extd_stats *buffer);
  751. QDF_STATUS
  752. (*txrx_update_vdev_stats)(struct cdp_soc_t *soc,
  753. uint8_t vdev_id, void *buf,
  754. uint16_t stats_id);
  755. int
  756. (*txrx_get_radio_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  757. void *buf);
  758. QDF_STATUS
  759. (*txrx_get_pdev_stats)(struct cdp_soc_t *soc, uint8_t pdev_id,
  760. struct cdp_pdev_stats *buf);
  761. int
  762. (*txrx_get_ratekbps)(int preamb, int mcs,
  763. int htflag, int gintval);
  764. QDF_STATUS
  765. (*txrx_update_peer_stats)(struct cdp_soc_t *soc, uint8_t vdev_id,
  766. uint8_t *peer_mac, void *stats,
  767. uint32_t last_tx_rate_mcs,
  768. uint32_t stats_id);
  769. };
  770. struct cdp_wds_ops {
  771. QDF_STATUS
  772. (*txrx_set_wds_rx_policy)(struct cdp_soc_t *soc, uint8_t vdev_id,
  773. u_int32_t val);
  774. QDF_STATUS
  775. (*txrx_wds_peer_tx_policy_update)(struct cdp_soc_t *soc,
  776. uint8_t vdev_id, uint8_t *peer_mac,
  777. int wds_tx_ucast, int wds_tx_mcast);
  778. int (*vdev_set_wds)(struct cdp_soc_t *soc, uint8_t vdev_id,
  779. uint32_t val);
  780. };
  781. struct cdp_raw_ops {
  782. int (*txrx_get_nwifi_mode)(struct cdp_soc_t *soc, uint8_t vdev_id);
  783. QDF_STATUS
  784. (*rsim_get_astentry)(struct cdp_soc_t *soc, uint8_t vdev_id,
  785. qdf_nbuf_t *pnbuf, struct cdp_raw_ast *raw_ast);
  786. };
  787. #ifdef PEER_FLOW_CONTROL
  788. struct cdp_pflow_ops {
  789. uint32_t (*pflow_update_pdev_params)(struct cdp_soc_t *soc,
  790. uint8_t pdev_id,
  791. enum _dp_param_t,
  792. uint32_t, void *);
  793. };
  794. #endif /* PEER_FLOW_CONTROL */
  795. #define LRO_IPV4_SEED_ARR_SZ 5
  796. #define LRO_IPV6_SEED_ARR_SZ 11
  797. /**
  798. * struct cdp_lro_hash_config - set rx_offld(LRO/GRO) init parameters
  799. * @lro_enable: indicates whether rx_offld is enabled
  800. * @tcp_flag: If the TCP flags from the packet do not match
  801. * the values in this field after masking with TCP flags mask
  802. * below, packet is not rx_offld eligible
  803. * @tcp_flag_mask: field for comparing the TCP values provided
  804. * above with the TCP flags field in the received packet
  805. * @toeplitz_hash_ipv4: contains seed needed to compute the flow id
  806. * 5-tuple toeplitz hash for ipv4 packets
  807. * @toeplitz_hash_ipv6: contains seed needed to compute the flow id
  808. * 5-tuple toeplitz hash for ipv6 packets
  809. */
  810. struct cdp_lro_hash_config {
  811. uint32_t lro_enable;
  812. uint32_t tcp_flag:9,
  813. tcp_flag_mask:9;
  814. uint32_t toeplitz_hash_ipv4[LRO_IPV4_SEED_ARR_SZ];
  815. uint32_t toeplitz_hash_ipv6[LRO_IPV6_SEED_ARR_SZ];
  816. };
  817. struct ol_if_ops {
  818. void
  819. (*peer_set_default_routing)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  820. uint8_t pdev_id, uint8_t *peer_macaddr,
  821. uint8_t vdev_id,
  822. bool hash_based, uint8_t ring_num);
  823. QDF_STATUS
  824. (*peer_rx_reorder_queue_setup)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  825. uint8_t pdev_id,
  826. uint8_t vdev_id, uint8_t *peer_mac,
  827. qdf_dma_addr_t hw_qdesc, int tid,
  828. uint16_t queue_num,
  829. uint8_t ba_window_size_valid,
  830. uint16_t ba_window_size);
  831. QDF_STATUS
  832. (*peer_rx_reorder_queue_remove)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  833. uint8_t pdev_id,
  834. uint8_t vdev_id, uint8_t *peer_macaddr,
  835. uint32_t tid_mask);
  836. int (*peer_unref_delete)(struct cdp_ctrl_objmgr_psoc *psoc,
  837. uint8_t pdev_id,
  838. uint8_t *peer_mac,
  839. uint8_t *vdev_mac, enum wlan_op_mode opmode);
  840. bool (*is_hw_dbs_2x2_capable)(struct wlan_objmgr_psoc *psoc);
  841. int (*peer_add_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  842. uint8_t vdev_id,
  843. uint8_t *peer_macaddr,
  844. uint16_t peer_id,
  845. const uint8_t *dest_macaddr,
  846. uint8_t *next_node_mac,
  847. uint32_t flags,
  848. uint8_t type);
  849. int (*peer_update_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  850. uint8_t vdev_id,
  851. uint8_t *dest_macaddr,
  852. uint8_t *peer_macaddr,
  853. uint32_t flags);
  854. void (*peer_del_wds_entry)(struct cdp_ctrl_objmgr_psoc *soc,
  855. uint8_t vdev_id,
  856. uint8_t *wds_macaddr,
  857. uint8_t type,
  858. uint8_t delete_in_fw);
  859. QDF_STATUS
  860. (*lro_hash_config)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t pdev_id,
  861. struct cdp_lro_hash_config *rx_offld_hash);
  862. void (*update_dp_stats)(void *soc, void *stats, uint16_t id,
  863. uint8_t type);
  864. #ifdef FEATURE_NAC_RSSI
  865. uint8_t (*rx_invalid_peer)(struct cdp_ctrl_objmgr_psoc *soc,
  866. uint8_t pdev_id, void *msg);
  867. #else
  868. uint8_t (*rx_invalid_peer)(uint8_t vdev_id, void *wh);
  869. #endif
  870. int (*peer_map_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  871. uint16_t peer_id, uint16_t hw_peer_id,
  872. uint8_t vdev_id, uint8_t *peer_mac_addr,
  873. enum cdp_txrx_ast_entry_type peer_type,
  874. uint32_t tx_ast_hashidx);
  875. int (*peer_unmap_event)(struct cdp_ctrl_objmgr_psoc *psoc,
  876. uint16_t peer_id,
  877. uint8_t vdev_id);
  878. int (*get_dp_cfg_param)(struct cdp_ctrl_objmgr_psoc *psoc,
  879. enum cdp_cfg_param_type param_num);
  880. void (*rx_mic_error)(struct cdp_ctrl_objmgr_psoc *psoc,
  881. uint8_t pdev_id,
  882. struct cdp_rx_mic_err_info *info);
  883. bool (*rx_frag_tkip_demic)(struct cdp_ctrl_objmgr_psoc *psoc,
  884. uint8_t vdev_id, uint8_t *peer_mac_addr,
  885. qdf_nbuf_t nbuf,
  886. uint16_t hdr_space);
  887. uint8_t (*freq_to_channel)(struct cdp_ctrl_objmgr_psoc *psoc,
  888. uint8_t pdev_id, uint16_t freq);
  889. uint8_t (*freq_to_band)(struct cdp_ctrl_objmgr_psoc *psoc,
  890. uint8_t pdev_id, uint16_t freq);
  891. #ifdef ATH_SUPPORT_NAC_RSSI
  892. int (*config_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  893. uint8_t pdev_id,
  894. u_int8_t vdev_id,
  895. enum cdp_nac_param_cmd cmd, char *bssid,
  896. char *client_macaddr, uint8_t chan_num);
  897. int
  898. (*config_bssid_in_fw_for_nac_rssi)(struct cdp_ctrl_objmgr_psoc *psoc,
  899. uint8_t pdev_id, u_int8_t vdev_id,
  900. enum cdp_nac_param_cmd cmd,
  901. char *bssid, char *client_mac);
  902. #endif
  903. int (*peer_sta_kickout)(struct cdp_ctrl_objmgr_psoc *psoc,
  904. uint16_t pdev_id, uint8_t *peer_macaddr);
  905. /**
  906. * send_delba() - Send delba to peer
  907. * @psoc: Objmgr soc handle
  908. * @vdev_id: dp vdev id
  909. * @peer_macaddr: Peer mac addr
  910. * @tid: Tid number
  911. *
  912. * Return: 0 for success, non-zero for failure
  913. */
  914. int (*send_delba)(struct cdp_ctrl_objmgr_psoc *psoc, uint8_t vdev_id,
  915. uint8_t *peer_macaddr, uint8_t tid,
  916. uint8_t reason_code);
  917. int
  918. (*peer_delete_multiple_wds_entries)(struct cdp_ctrl_objmgr_psoc *psoc,
  919. uint8_t vdev_id,
  920. uint8_t *dest_macaddr,
  921. uint8_t *peer_macaddr,
  922. uint32_t flags);
  923. int
  924. (*pdev_update_lmac_n_target_pdev_id)(struct cdp_ctrl_objmgr_psoc *psoc,
  925. uint8_t *pdev_id,
  926. uint8_t *lmac_id,
  927. uint8_t *target_pdev_id);
  928. bool (*is_roam_inprogress)(uint32_t vdev_id);
  929. enum QDF_GLOBAL_MODE (*get_con_mode)(void);
  930. #ifdef QCA_PEER_MULTIQ_SUPPORT
  931. int (*peer_ast_flowid_map)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle,
  932. uint16_t peer_id, uint8_t vdev_id, uint8_t *peer_mac_addr);
  933. #endif
  934. int (*get_soc_nss_cfg)(struct cdp_ctrl_objmgr_psoc *ol_soc_handle);
  935. char *(*get_device_name)(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
  936. uint8_t pdev_id);
  937. QDF_STATUS(*nss_stats_clr)(struct cdp_ctrl_objmgr_psoc *psoc,
  938. uint8_t vdev_id);
  939. /* TODO: Add any other control path calls required to OL_IF/WMA layer */
  940. };
  941. #ifdef DP_PEER_EXTENDED_API
  942. /**
  943. * struct cdp_misc_ops - mcl ops not classified
  944. * @set_ibss_vdev_heart_beat_timer: Update ibss vdev heart beat timer
  945. * @set_wmm_param: set wmm parameters
  946. * @bad_peer_txctl_set_setting: configure bad peer tx limit setting
  947. * @bad_peer_txctl_update_threshold: configure bad peer tx threshold limit
  948. * @hl_tdls_flag_reset: reset tdls flag for vdev
  949. * @tx_non_std: Allow the control-path SW to send data frames
  950. * @get_vdev_id: get vdev id
  951. * @set_wisa_mode: set wisa mode for a vdev
  952. * @txrx_data_stall_cb_register: register data stall callback
  953. * @txrx_data_stall_cb_deregister: deregister data stall callback
  954. * @txrx_post_data_stall_event: post data stall event
  955. * @runtime_suspend: ensure TXRX is ready to runtime suspend
  956. * @runtime_resume: ensure TXRX is ready to runtime resume
  957. * @get_opmode: get operation mode of vdev
  958. * @mark_first_wakeup_packet: set flag to indicate that fw is compatible for
  959. marking first packet after wow wakeup
  960. * @update_mac_id: update mac_id for vdev
  961. * @flush_rx_frames: flush rx frames on the queue
  962. * @get_intra_bss_fwd_pkts_count: to get the total tx and rx packets that
  963. has been forwarded from txrx layer
  964. without going to upper layers
  965. * @pkt_log_init: handler to initialize packet log
  966. * @pkt_log_con_service: handler to connect packet log service
  967. * @get_num_rx_contexts: handler to get number of RX contexts
  968. * @register_packetdump_cb: register callback for different pktlog
  969. * @unregister_packetdump_cb: unregister callback for different pktlog
  970. * @pdev_reset_driver_del_ack: reset driver delayed ack enabled flag
  971. * @vdev_set_driver_del_ack_enable: set driver delayed ack enabled flag
  972. *
  973. * Function pointers for miscellaneous soc/pdev/vdev related operations.
  974. */
  975. struct cdp_misc_ops {
  976. uint16_t (*set_ibss_vdev_heart_beat_timer)(struct cdp_soc_t *soc_hdl,
  977. uint8_t vdev_id,
  978. uint16_t timer_value_sec);
  979. void (*set_wmm_param)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  980. struct ol_tx_wmm_param_t wmm_param);
  981. void (*bad_peer_txctl_set_setting)(struct cdp_soc_t *soc_hdl,
  982. uint8_t pdev_id, int enable,
  983. int period, int txq_limit);
  984. void (*bad_peer_txctl_update_threshold)(struct cdp_soc_t *soc_hdl,
  985. uint8_t pdev_id,
  986. int level, int tput_thresh,
  987. int tx_limit);
  988. void (*hl_tdls_flag_reset)(struct cdp_soc_t *soc_hdl,
  989. uint8_t vdev_id, bool flag);
  990. qdf_nbuf_t (*tx_non_std)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  991. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
  992. uint16_t (*get_vdev_id)(struct cdp_vdev *vdev);
  993. uint32_t (*get_tx_ack_stats)(struct cdp_soc_t *soc_hdl,
  994. uint8_t vdev_id);
  995. QDF_STATUS (*set_wisa_mode)(struct cdp_soc_t *soc_hdl,
  996. uint8_t vdev_id, bool enable);
  997. QDF_STATUS (*txrx_data_stall_cb_register)(struct cdp_soc_t *soc_hdl,
  998. uint8_t pdev_id,
  999. data_stall_detect_cb cb);
  1000. QDF_STATUS (*txrx_data_stall_cb_deregister)(struct cdp_soc_t *soc_hdl,
  1001. uint8_t pdev_id,
  1002. data_stall_detect_cb cb);
  1003. void (*txrx_post_data_stall_event)(
  1004. struct cdp_soc_t *soc_hdl,
  1005. enum data_stall_log_event_indicator indicator,
  1006. enum data_stall_log_event_type data_stall_type,
  1007. uint32_t pdev_id, uint32_t vdev_id_bitmap,
  1008. enum data_stall_log_recovery_type recovery_type);
  1009. QDF_STATUS (*runtime_suspend)(struct cdp_soc_t *soc_hdl,
  1010. uint8_t pdev_id);
  1011. QDF_STATUS (*runtime_resume)(struct cdp_soc_t *soc_hdl,
  1012. uint8_t pdev_id);
  1013. int (*get_opmode)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1014. void (*mark_first_wakeup_packet)(struct cdp_soc_t *soc_hdl,
  1015. uint8_t pdev_id, uint8_t value);
  1016. void (*update_mac_id)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1017. uint8_t mac_id);
  1018. void (*flush_rx_frames)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1019. void *peer, bool drop);
  1020. A_STATUS(*get_intra_bss_fwd_pkts_count)(struct cdp_soc_t *soc_hdl,
  1021. uint8_t vdev_id,
  1022. uint64_t *fwd_tx_packets,
  1023. uint64_t *fwd_rx_packets);
  1024. void (*pkt_log_init)(struct cdp_soc_t *soc_hdl, uint8_t pdev,
  1025. void *scn);
  1026. void (*pkt_log_con_service)(struct cdp_soc_t *soc_hdl,
  1027. uint8_t pdev_id, void *scn);
  1028. int (*get_num_rx_contexts)(struct cdp_soc_t *soc_hdl);
  1029. void (*register_pktdump_cb)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1030. ol_txrx_pktdump_cb tx_cb,
  1031. ol_txrx_pktdump_cb rx_cb);
  1032. void (*unregister_pktdump_cb)(struct cdp_soc_t *soc_hdl,
  1033. uint8_t pdev_id);
  1034. void (*pdev_reset_driver_del_ack)(struct cdp_soc_t *soc_hdl,
  1035. uint8_t pdev_id);
  1036. void (*vdev_set_driver_del_ack_enable)(struct cdp_soc_t *soc_hdl,
  1037. uint8_t vdev_id,
  1038. unsigned long rx_packets,
  1039. uint32_t time_in_ms,
  1040. uint32_t high_th,
  1041. uint32_t low_th);
  1042. void (*vdev_set_bundle_require_flag)(uint8_t vdev_id,
  1043. unsigned long tx_bytes,
  1044. uint32_t time_in_ms,
  1045. uint32_t high_th,
  1046. uint32_t low_th);
  1047. void (*pdev_reset_bundle_require_flag)(struct cdp_soc_t *soc_hdl,
  1048. uint8_t pdev_id);
  1049. QDF_STATUS (*txrx_ext_stats_request)(struct cdp_soc_t *soc_hdl,
  1050. uint8_t pdev_id,
  1051. struct cdp_txrx_ext_stats *req);
  1052. QDF_STATUS (*request_rx_hw_stats)(struct cdp_soc_t *soc_hdl,
  1053. uint8_t vdev_id);
  1054. };
  1055. /**
  1056. * struct cdp_ocb_ops - mcl ocb ops
  1057. * @set_ocb_chan_info: set OCB channel info
  1058. * @get_ocb_chan_info: get OCB channel info
  1059. *
  1060. * Function pointers for operations related to OCB.
  1061. */
  1062. struct cdp_ocb_ops {
  1063. void (*set_ocb_chan_info)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1064. struct ol_txrx_ocb_set_chan ocb_set_chan);
  1065. struct ol_txrx_ocb_chan_info *(*get_ocb_chan_info)(
  1066. struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1067. };
  1068. /**
  1069. * struct cdp_peer_ops - mcl peer related ops
  1070. * @register_peer:
  1071. * @clear_peer:
  1072. * @find_peer_exist
  1073. * @find_peer_exist_on_vdev
  1074. * @find_peer_exist_on_other_vdev
  1075. * @peer_state_update:
  1076. * @get_vdevid:
  1077. * @register_ocb_peer:
  1078. * @peer_get_peer_mac_addr:
  1079. * @get_peer_state:
  1080. * @update_ibss_add_peer_num_of_vdev:
  1081. * @copy_mac_addr_raw:
  1082. * @add_last_real_peer:
  1083. * @is_vdev_restore_last_peer:
  1084. * @update_last_real_peer:
  1085. */
  1086. struct cdp_peer_ops {
  1087. QDF_STATUS (*register_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1088. struct ol_txrx_desc_type *sta_desc);
  1089. QDF_STATUS (*clear_peer)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1090. struct qdf_mac_addr peer_addr);
  1091. bool (*find_peer_exist)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1092. uint8_t *peer_addr);
  1093. bool (*find_peer_exist_on_vdev)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1094. uint8_t *peer_addr);
  1095. bool (*find_peer_exist_on_other_vdev)(struct cdp_soc_t *soc,
  1096. uint8_t vdev_id,
  1097. uint8_t *peer_addr,
  1098. uint16_t max_bssid);
  1099. QDF_STATUS (*peer_state_update)(struct cdp_soc_t *soc,
  1100. uint8_t *peer_addr,
  1101. enum ol_txrx_peer_state state);
  1102. QDF_STATUS (*get_vdevid)(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
  1103. uint8_t *vdev_id);
  1104. struct cdp_vdev * (*get_vdev_by_peer_addr)(struct cdp_pdev *pdev,
  1105. struct qdf_mac_addr peer_addr);
  1106. QDF_STATUS (*register_ocb_peer)(uint8_t *mac_addr);
  1107. uint8_t * (*peer_get_peer_mac_addr)(void *peer);
  1108. int (*get_peer_state)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1109. uint8_t *peer_mac);
  1110. struct cdp_vdev * (*get_vdev_for_peer)(void *peer);
  1111. int16_t (*update_ibss_add_peer_num_of_vdev)(struct cdp_soc_t *soc,
  1112. uint8_t vdev_id,
  1113. int16_t peer_num_delta);
  1114. void (*remove_peers_for_vdev)(struct cdp_vdev *vdev,
  1115. ol_txrx_vdev_peer_remove_cb callback,
  1116. void *callback_context, bool remove_last_peer);
  1117. void (*remove_peers_for_vdev_no_lock)(struct cdp_vdev *vdev,
  1118. ol_txrx_vdev_peer_remove_cb callback,
  1119. void *callback_context);
  1120. void (*copy_mac_addr_raw)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1121. uint8_t *bss_addr);
  1122. void (*add_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1123. uint8_t vdev_id);
  1124. bool (*is_vdev_restore_last_peer)(struct cdp_soc_t *soc,
  1125. uint8_t vdev_id,
  1126. uint8_t *peer_mac);
  1127. void (*update_last_real_peer)(struct cdp_soc_t *soc, uint8_t pdev_id,
  1128. uint8_t vdev_id, bool restore_last_peer);
  1129. void (*peer_detach_force_delete)(struct cdp_soc_t *soc_hdl,
  1130. uint8_t vdev_id, uint8_t *peer_addr);
  1131. void (*set_tdls_offchan_enabled)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1132. uint8_t *peer_mac, bool val);
  1133. void (*set_peer_as_tdls_peer)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1134. uint8_t *peer_mac, bool val);
  1135. };
  1136. /**
  1137. * struct cdp_mob_stats_ops - mcl mob stats ops
  1138. * @clear_stats: handler to clear ol txrx stats
  1139. * @stats: handler to update ol txrx stats
  1140. */
  1141. struct cdp_mob_stats_ops {
  1142. QDF_STATUS(*clear_stats)(struct cdp_soc_t *soc_hdl,
  1143. uint8_t pdev_id, uint8_t bitmap);
  1144. int (*stats)(uint8_t vdev_id, char *buffer, unsigned buf_len);
  1145. };
  1146. /**
  1147. * struct cdp_pmf_ops - mcl protected management frame ops
  1148. * @get_pn_info: handler to get pn info from peer
  1149. *
  1150. * Function pointers for pmf related operations.
  1151. */
  1152. struct cdp_pmf_ops {
  1153. void (*get_pn_info)(struct cdp_soc_t *soc, uint8_t *peer_mac,
  1154. uint8_t vdev_id, uint8_t **last_pn_valid,
  1155. uint64_t **last_pn, uint32_t **rmf_pn_replays);
  1156. };
  1157. #endif
  1158. #ifdef DP_FLOW_CTL
  1159. /**
  1160. * struct cdp_cfg_ops - mcl configuration ops
  1161. * @set_cfg_rx_fwd_disabled: set rx_fwd_disabled flag
  1162. * @set_cfg_packet_log_enabled: set is_packet_log_enabled flag
  1163. * @cfg_attach: hardcode the configuration parameters
  1164. * @vdev_rx_set_intrabss_fwd: set disable_intrabss_fwd flag
  1165. * @is_rx_fwd_disabled: get the rx_fwd_disabled flag,
  1166. * 1 enabled, 0 disabled.
  1167. * @tx_set_is_mgmt_over_wmi_enabled: set is_mgmt_over_wmi_enabled flag to
  1168. * indicate that mgmt over wmi is enabled
  1169. * or not,
  1170. * 1 for enabled, 0 for disable
  1171. * @is_high_latency: get device is high or low latency device,
  1172. * 1 high latency bus, 0 low latency bus
  1173. * @set_flow_control_parameters: set flow control parameters
  1174. * @set_flow_steering: set flow_steering_enabled flag
  1175. * @set_ptp_rx_opt_enabled: set is_ptp_rx_opt_enabled flag
  1176. * @set_new_htt_msg_format: set new_htt_msg_format flag
  1177. * @set_peer_unmap_conf_support: set enable_peer_unmap_conf_support flag
  1178. * @get_peer_unmap_conf_support: get enable_peer_unmap_conf_support flag
  1179. * @set_tx_compl_tsf64: set enable_tx_compl_tsf64 flag,
  1180. * 1 enabled, 0 disabled.
  1181. * @get_tx_compl_tsf64: get enable_tx_compl_tsf64 flag,
  1182. * 1 enabled, 0 disabled.
  1183. */
  1184. struct cdp_cfg_ops {
  1185. void (*set_cfg_rx_fwd_disabled)(struct cdp_cfg *cfg_pdev,
  1186. uint8_t disable_rx_fwd);
  1187. void (*set_cfg_packet_log_enabled)(struct cdp_cfg *cfg_pdev,
  1188. uint8_t val);
  1189. struct cdp_cfg * (*cfg_attach)(qdf_device_t osdev, void *cfg_param);
  1190. void (*vdev_rx_set_intrabss_fwd)(struct cdp_soc_t *soc_hdl,
  1191. uint8_t vdev_id, bool val);
  1192. uint8_t (*is_rx_fwd_disabled)(struct cdp_vdev *vdev);
  1193. void (*tx_set_is_mgmt_over_wmi_enabled)(uint8_t value);
  1194. int (*is_high_latency)(struct cdp_cfg *cfg_pdev);
  1195. void (*set_flow_control_parameters)(struct cdp_cfg *cfg_pdev,
  1196. void *param);
  1197. void (*set_flow_steering)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1198. void (*set_ptp_rx_opt_enabled)(struct cdp_cfg *cfg_pdev, uint8_t val);
  1199. void (*set_new_htt_msg_format)(uint8_t val);
  1200. void (*set_peer_unmap_conf_support)(bool val);
  1201. bool (*get_peer_unmap_conf_support)(void);
  1202. void (*set_tx_compl_tsf64)(bool val);
  1203. bool (*get_tx_compl_tsf64)(void);
  1204. };
  1205. /**
  1206. * struct cdp_flowctl_ops - mcl flow control
  1207. * @flow_pool_map_handler: handler to map flow_id and pool descriptors
  1208. * @flow_pool_unmap_handler: handler to unmap flow_id and pool descriptors
  1209. * @register_pause_cb: handler to register tx pause callback
  1210. * @set_desc_global_pool_size: handler to set global pool size
  1211. * @dump_flow_pool_info: handler to dump global and flow pool info
  1212. * @tx_desc_thresh_reached: handler to set tx desc threshold
  1213. *
  1214. * Function pointers for operations related to flow control
  1215. */
  1216. struct cdp_flowctl_ops {
  1217. QDF_STATUS (*flow_pool_map_handler)(struct cdp_soc_t *soc,
  1218. uint8_t pdev_id,
  1219. uint8_t vdev_id);
  1220. void (*flow_pool_unmap_handler)(struct cdp_soc_t *soc,
  1221. uint8_t pdev_id,
  1222. uint8_t vdev_id);
  1223. QDF_STATUS (*register_pause_cb)(struct cdp_soc_t *soc,
  1224. tx_pause_callback);
  1225. void (*set_desc_global_pool_size)(uint32_t num_msdu_desc);
  1226. void (*dump_flow_pool_info)(struct cdp_soc_t *soc_hdl);
  1227. bool (*tx_desc_thresh_reached)(struct cdp_soc_t *soc_hdl,
  1228. uint8_t vdev_id);
  1229. };
  1230. /**
  1231. * struct cdp_lflowctl_ops - mcl legacy flow control ops
  1232. * @register_tx_flow_control: Register tx flow control callback
  1233. * @set_vdev_tx_desc_limit: Set tx descriptor limit for a vdev
  1234. * @set_vdev_os_queue_status: Set vdev queue status
  1235. * @deregister_tx_flow_control_cb: Deregister tx flow control callback
  1236. * @flow_control_cb: Call osif flow control callback
  1237. * @get_tx_resource: Get tx resources and comapre with watermark
  1238. * @ll_set_tx_pause_q_depth: set pause queue depth
  1239. * @vdev_flush: Flush all packets on a particular vdev
  1240. * @vdev_pause: Pause a particular vdev
  1241. * @vdev_unpause: Unpause a particular vdev
  1242. *
  1243. * Function pointers for operations related to flow control
  1244. */
  1245. struct cdp_lflowctl_ops {
  1246. #ifdef QCA_HL_NETDEV_FLOW_CONTROL
  1247. int (*register_tx_flow_control)(struct cdp_soc_t *soc_hdl,
  1248. uint8_t pdev_id,
  1249. tx_pause_callback flowcontrol);
  1250. int (*set_vdev_tx_desc_limit)(struct cdp_soc_t *soc_hdl,
  1251. uint8_t vdev_id, uint32_t chan_freq);
  1252. int (*set_vdev_os_queue_status)(struct cdp_soc_t *soc_hdl,
  1253. uint8_t vdev_id,
  1254. enum netif_action_type action);
  1255. #else
  1256. int (*register_tx_flow_control)(
  1257. struct cdp_soc_t *soc_hdl,
  1258. uint8_t vdev_id,
  1259. ol_txrx_tx_flow_control_fp flowControl, void *osif_fc_ctx,
  1260. ol_txrx_tx_flow_control_is_pause_fp flow_control_is_pause);
  1261. #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
  1262. int (*deregister_tx_flow_control_cb)(struct cdp_soc_t *soc_hdl,
  1263. uint8_t vdev_id);
  1264. void (*flow_control_cb)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1265. bool tx_resume);
  1266. bool (*get_tx_resource)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1267. struct qdf_mac_addr peer_addr,
  1268. unsigned int low_watermark,
  1269. unsigned int high_watermark_offset);
  1270. int (*ll_set_tx_pause_q_depth)(struct cdp_soc_t *soc, uint8_t vdev_id,
  1271. int pause_q_depth);
  1272. void (*vdev_flush)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
  1273. void (*vdev_pause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1274. uint32_t reason, uint32_t pause_type);
  1275. void (*vdev_unpause)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1276. uint32_t reason, uint32_t pause_type);
  1277. };
  1278. /**
  1279. * struct cdp_throttle_ops - mcl throttle ops
  1280. * @throttle_init_period: handler to initialize tx throttle time
  1281. * @throttle_set_level: handler to set tx throttle level
  1282. */
  1283. struct cdp_throttle_ops {
  1284. void (*throttle_init_period)(struct cdp_soc_t *soc_hdl,
  1285. uint8_t pdev_id, int period,
  1286. uint8_t *dutycycle_level);
  1287. void (*throttle_set_level)(struct cdp_soc_t *soc_hdl,
  1288. uint8_t pdev_id, int level);
  1289. };
  1290. #endif
  1291. #ifdef IPA_OFFLOAD
  1292. /**
  1293. * struct cdp_ipa_ops - mcl ipa data path ops
  1294. * @ipa_get_resource:
  1295. * @ipa_set_doorbell_paddr:
  1296. * @ipa_set_active:
  1297. * @ipa_op_response:
  1298. * @ipa_register_op_cb:
  1299. * @ipa_get_stat:
  1300. * @ipa_tx_data_frame:
  1301. */
  1302. struct cdp_ipa_ops {
  1303. QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl,
  1304. uint8_t pdev_id);
  1305. QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_soc_t *soc_hdl,
  1306. uint8_t pdev_id);
  1307. QDF_STATUS (*ipa_set_active)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1308. bool uc_active, bool is_tx);
  1309. QDF_STATUS (*ipa_op_response)(struct cdp_soc_t *soc_hdl,
  1310. uint8_t pdev_id, uint8_t *op_msg);
  1311. QDF_STATUS (*ipa_register_op_cb)(struct cdp_soc_t *soc_hdl,
  1312. uint8_t pdev_id,
  1313. void (*ipa_uc_op_cb_type)
  1314. (uint8_t *op_msg, void *osif_ctxt),
  1315. void *usr_ctxt);
  1316. QDF_STATUS (*ipa_get_stat)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1317. qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_soc_t *soc_hdl,
  1318. uint8_t vdev_id, qdf_nbuf_t skb);
  1319. void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
  1320. uint32_t value);
  1321. #ifdef FEATURE_METERING
  1322. QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_soc_t *soc_hdl,
  1323. uint8_t pdev_id,
  1324. uint8_t reset_stats);
  1325. QDF_STATUS (*ipa_uc_set_quota)(struct cdp_soc_t *soc_hdl,
  1326. uint8_t pdev_id, uint64_t quota_bytes);
  1327. #endif
  1328. QDF_STATUS (*ipa_enable_autonomy)(struct cdp_soc_t *soc_hdl,
  1329. uint8_t pdev_id);
  1330. QDF_STATUS (*ipa_disable_autonomy)(struct cdp_soc_t *soc_hdl,
  1331. uint8_t pdev_id);
  1332. #ifdef CONFIG_IPA_WDI_UNIFIED_API
  1333. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1334. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1335. void *ipa_wdi_meter_notifier_cb,
  1336. uint32_t ipa_desc_size, void *ipa_priv,
  1337. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1338. uint32_t *rx_pipe_handle, bool is_smmu_enabled,
  1339. qdf_ipa_sys_connect_params_t *sys_in,
  1340. bool over_gsi);
  1341. #else /* CONFIG_IPA_WDI_UNIFIED_API */
  1342. QDF_STATUS (*ipa_setup)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1343. void *ipa_i2w_cb, void *ipa_w2i_cb,
  1344. void *ipa_wdi_meter_notifier_cb,
  1345. uint32_t ipa_desc_size, void *ipa_priv,
  1346. bool is_rm_enabled, uint32_t *tx_pipe_handle,
  1347. uint32_t *rx_pipe_handle);
  1348. #endif /* CONFIG_IPA_WDI_UNIFIED_API */
  1349. QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
  1350. uint32_t rx_pipe_handle);
  1351. QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
  1352. qdf_ipa_client_type_t prod_client,
  1353. qdf_ipa_client_type_t cons_client,
  1354. uint8_t session_id, bool is_ipv6_enabled);
  1355. QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
  1356. QDF_STATUS (*ipa_enable_pipes)(struct cdp_soc_t *soc_hdl,
  1357. uint8_t pdev_id);
  1358. QDF_STATUS (*ipa_disable_pipes)(struct cdp_soc_t *soc_hdl,
  1359. uint8_t pdev_id);
  1360. QDF_STATUS (*ipa_set_perf_level)(int client,
  1361. uint32_t max_supported_bw_mbps);
  1362. bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  1363. qdf_nbuf_t nbuf, bool *fwd_success);
  1364. };
  1365. #endif
  1366. #ifdef DP_POWER_SAVE
  1367. /**
  1368. * struct cdp_tx_delay_ops - mcl tx delay ops
  1369. * @tx_delay: handler to get tx packet delay
  1370. * @tx_delay_hist: handler to get tx packet delay histogram
  1371. * @tx_packet_count: handler to get tx packet count
  1372. * @tx_set_compute_interval: update compute interval period for TSM stats
  1373. *
  1374. * Function pointer for operations related to tx delay.
  1375. */
  1376. struct cdp_tx_delay_ops {
  1377. void (*tx_delay)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1378. uint32_t *queue_delay_microsec,
  1379. uint32_t *tx_delay_microsec, int category);
  1380. void (*tx_delay_hist)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1381. uint16_t *bin_values, int category);
  1382. void (*tx_packet_count)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  1383. uint16_t *out_packet_count,
  1384. uint16_t *out_packet_loss_count, int category);
  1385. void (*tx_set_compute_interval)(struct cdp_soc_t *soc_hdl,
  1386. uint8_t pdev_id, uint32_t interval);
  1387. };
  1388. /**
  1389. * struct cdp_bus_ops - mcl bus suspend/resume ops
  1390. * @bus_suspend: handler for bus suspend
  1391. * @bus_resume: handler for bus resume
  1392. * @process_wow_ack_rsp: handler for wow ack response
  1393. * @process_target_suspend_req: handler for target suspend request
  1394. */
  1395. struct cdp_bus_ops {
  1396. QDF_STATUS (*bus_suspend)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1397. QDF_STATUS (*bus_resume)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1398. void (*process_wow_ack_rsp)(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
  1399. void (*process_target_suspend_req)(struct cdp_soc_t *soc_hdl,
  1400. uint8_t pdev_id);
  1401. };
  1402. #endif
  1403. #ifdef RECEIVE_OFFLOAD
  1404. /**
  1405. * struct cdp_rx_offld_ops - mcl host receive offload ops
  1406. * @register_rx_offld_flush_cb:
  1407. * @deregister_rx_offld_flush_cb:
  1408. */
  1409. struct cdp_rx_offld_ops {
  1410. void (*register_rx_offld_flush_cb)(void (rx_offld_flush_cb)(void *));
  1411. void (*deregister_rx_offld_flush_cb)(void);
  1412. };
  1413. #endif
  1414. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1415. /**
  1416. * struct cdp_cfr_ops - host cfr ops
  1417. * @txrx_cfr_filter: Handler to configure host rx monitor status ring
  1418. * @txrx_get_cfr_rcc: Handler to get CFR mode
  1419. * @txrx_set_cfr_rcc: Handler to enable/disable CFR mode
  1420. * @txrx_get_cfr_dbg_stats: Handler to get debug statistics for CFR mode
  1421. * @txrx_clear_cfr_dbg_stats: Handler to clear debug statistics for CFR mode
  1422. * @txrx_enable_mon_reap_timer: Enable/Disable reap timer of monitor status ring
  1423. */
  1424. struct cdp_cfr_ops {
  1425. void (*txrx_cfr_filter)(struct cdp_soc_t *soc_hdl,
  1426. uint8_t pdev_id,
  1427. bool enable,
  1428. struct cdp_monitor_filter *filter_val);
  1429. bool (*txrx_get_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1430. uint8_t pdev_id);
  1431. void (*txrx_set_cfr_rcc)(struct cdp_soc_t *soc_hdl,
  1432. uint8_t pdev_id,
  1433. bool enable);
  1434. void (*txrx_get_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1435. uint8_t pdev_id,
  1436. struct cdp_cfr_rcc_stats *buf);
  1437. void (*txrx_clear_cfr_dbg_stats)(struct cdp_soc_t *soc_hdl,
  1438. uint8_t pdev_id);
  1439. void (*txrx_enable_mon_reap_timer)(struct cdp_soc_t *soc_hdl,
  1440. uint8_t pdev_id,
  1441. bool enable);
  1442. };
  1443. #endif
  1444. struct cdp_ops {
  1445. struct cdp_cmn_ops *cmn_drv_ops;
  1446. struct cdp_ctrl_ops *ctrl_ops;
  1447. struct cdp_me_ops *me_ops;
  1448. struct cdp_mon_ops *mon_ops;
  1449. struct cdp_host_stats_ops *host_stats_ops;
  1450. struct cdp_wds_ops *wds_ops;
  1451. struct cdp_raw_ops *raw_ops;
  1452. struct cdp_pflow_ops *pflow_ops;
  1453. #ifdef DP_PEER_EXTENDED_API
  1454. struct cdp_misc_ops *misc_ops;
  1455. struct cdp_peer_ops *peer_ops;
  1456. struct cdp_ocb_ops *ocb_ops;
  1457. struct cdp_mob_stats_ops *mob_stats_ops;
  1458. struct cdp_pmf_ops *pmf_ops;
  1459. #endif
  1460. #ifdef DP_FLOW_CTL
  1461. struct cdp_cfg_ops *cfg_ops;
  1462. struct cdp_flowctl_ops *flowctl_ops;
  1463. struct cdp_lflowctl_ops *l_flowctl_ops;
  1464. struct cdp_throttle_ops *throttle_ops;
  1465. #endif
  1466. #ifdef DP_POWER_SAVE
  1467. struct cdp_bus_ops *bus_ops;
  1468. struct cdp_tx_delay_ops *delay_ops;
  1469. #endif
  1470. #ifdef IPA_OFFLOAD
  1471. struct cdp_ipa_ops *ipa_ops;
  1472. #endif
  1473. #ifdef RECEIVE_OFFLOAD
  1474. struct cdp_rx_offld_ops *rx_offld_ops;
  1475. #endif
  1476. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  1477. struct cdp_cfr_ops *cfr_ops;
  1478. #endif
  1479. };
  1480. #endif