cdp_txrx_cmn.h 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605
  1. /*
  2. * Copyright (c) 2011-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /**
  19. * @file cdp_txrx_cmn.h
  20. * @brief Define the host data path converged API functions
  21. * called by the host control SW and the OS interface module
  22. */
  23. #ifndef _CDP_TXRX_CMN_H_
  24. #define _CDP_TXRX_CMN_H_
  25. #include "qdf_types.h"
  26. #include "qdf_nbuf.h"
  27. #include "cdp_txrx_ops.h"
  28. #include "cdp_txrx_handle.h"
  29. #include "cdp_txrx_cmn_struct.h"
  30. #ifdef ENABLE_VERBOSE_DEBUG
  31. extern bool is_dp_verbose_debug_enabled;
  32. #endif
  33. /******************************************************************************
  34. *
  35. * Common Data Path Header File
  36. *
  37. *****************************************************************************/
  38. #define dp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP, params)
  39. #define dp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP, params)
  40. #define dp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP, params)
  41. #define dp_info(params...) \
  42. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP, ## params)
  43. #define dp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP, params)
  44. #ifdef ENABLE_VERBOSE_DEBUG
  45. /**
  46. * @enum verbose_debug_module:
  47. * if INI "enable_verbose_debug" has to set following bit positions to enable
  48. * respective module's excessive logging,
  49. *
  50. * @hif_verbose_debug_mask: 1st bit [0th index] is for HIF module
  51. * @hal_verbose_debug_mask: 2nd bit [1st index] is for HAL module
  52. * @dp_verbose_debug_mask: 3rd bit [2nd index] is for DP module
  53. */
  54. enum verbose_debug_module {
  55. hif_vebose_debug_mask = 1 << 0,
  56. hal_verbose_debug_mask = 1 << 1,
  57. dp_verbose_debug_mask = 1 << 2,
  58. };
  59. #define dp_verbose_debug(params...) \
  60. if (unlikely(is_dp_verbose_debug_enabled)) \
  61. do {\
  62. QDF_TRACE_DEBUG(QDF_MODULE_ID_DP, params); \
  63. } while (0)
  64. #else
  65. #define dp_verbose_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP, params)
  66. #endif
  67. #define dp_alert_rl(params...) QDF_TRACE_FATAL_RL(QDF_MODULE_ID_DP, params)
  68. #define dp_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP, params)
  69. #define dp_warn_rl(params...) QDF_TRACE_WARN_RL(QDF_MODULE_ID_DP, params)
  70. #define dp_info_rl(params...) QDF_TRACE_INFO_RL(QDF_MODULE_ID_DP, params)
  71. #define dp_debug_rl(params...) QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP, params)
  72. /**
  73. * @enum vdev_host_stats_id:
  74. * host stats update from CDP have to set one of the following stats ID
  75. *
  76. * @DP_VDEV_STATS_PKT_CNT_ONLY: update Tx packet count only
  77. * @DP_VDEV_STATS_TX_ME: update Tx ingress stats
  78. */
  79. enum {
  80. DP_VDEV_STATS_PKT_CNT_ONLY,
  81. DP_VDEV_STATS_TX_ME,
  82. };
  83. static inline QDF_STATUS
  84. cdp_soc_attach_target(ol_txrx_soc_handle soc)
  85. {
  86. if (!soc || !soc->ops) {
  87. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  88. "%s: Invalid Instance:", __func__);
  89. QDF_BUG(0);
  90. return QDF_STATUS_E_INVAL;
  91. }
  92. if (!soc->ops->cmn_drv_ops ||
  93. !soc->ops->cmn_drv_ops->txrx_soc_attach_target)
  94. return QDF_STATUS_SUCCESS;
  95. return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc);
  96. }
  97. static inline int
  98. cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc)
  99. {
  100. if (!soc || !soc->ops) {
  101. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  102. "%s: Invalid Instance:", __func__);
  103. QDF_BUG(0);
  104. return 0;
  105. }
  106. if (!soc->ops->cmn_drv_ops ||
  107. !soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg)
  108. return 0;
  109. return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc);
  110. }
  111. static inline void
  112. cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config)
  113. {
  114. if (!soc || !soc->ops) {
  115. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  116. "%s: Invalid Instance:", __func__);
  117. QDF_BUG(0);
  118. return;
  119. }
  120. if (!soc->ops->cmn_drv_ops ||
  121. !soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg)
  122. return;
  123. soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config);
  124. }
  125. static inline struct cdp_vdev *
  126. cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  127. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  128. {
  129. if (!soc || !soc->ops) {
  130. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  131. "%s: Invalid Instance:", __func__);
  132. QDF_BUG(0);
  133. return NULL;
  134. }
  135. if (!soc->ops->cmn_drv_ops ||
  136. !soc->ops->cmn_drv_ops->txrx_vdev_attach)
  137. return NULL;
  138. return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev,
  139. vdev_mac_addr, vdev_id, op_mode);
  140. }
  141. #ifdef CONFIG_MCL
  142. /**
  143. * cdp_flow_pool_map() - Create flow pool for vdev
  144. * @soc - data path soc handle
  145. * @pdev
  146. * @vdev_id - vdev_id corresponding to vdev start
  147. *
  148. * Create per vdev flow pool.
  149. *
  150. * return none
  151. */
  152. static inline QDF_STATUS cdp_flow_pool_map(ol_txrx_soc_handle soc,
  153. struct cdp_pdev *pdev, uint8_t vdev_id)
  154. {
  155. if (!soc || !soc->ops) {
  156. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  157. "%s: Invalid Instance:", __func__);
  158. QDF_BUG(0);
  159. return QDF_STATUS_E_INVAL;
  160. }
  161. if (!soc->ops->flowctl_ops ||
  162. !soc->ops->flowctl_ops->flow_pool_map_handler)
  163. return QDF_STATUS_E_INVAL;
  164. return soc->ops->flowctl_ops->flow_pool_map_handler(soc, pdev, vdev_id);
  165. }
  166. /**
  167. * cdp_flow_pool_unmap() - Delete flow pool
  168. * @soc - data path soc handle
  169. * @pdev
  170. * @vdev_id - vdev_id corresponding to vdev start
  171. *
  172. * Delete flow pool
  173. *
  174. * return none
  175. */
  176. static inline void cdp_flow_pool_unmap(ol_txrx_soc_handle soc,
  177. struct cdp_pdev *pdev, uint8_t vdev_id)
  178. {
  179. if (!soc || !soc->ops) {
  180. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  181. "%s: Invalid Instance:", __func__);
  182. QDF_BUG(0);
  183. return;
  184. }
  185. if (!soc->ops->flowctl_ops ||
  186. !soc->ops->flowctl_ops->flow_pool_unmap_handler)
  187. return;
  188. return soc->ops->flowctl_ops->flow_pool_unmap_handler(soc, pdev,
  189. vdev_id);
  190. }
  191. #endif
  192. static inline void
  193. cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  194. ol_txrx_vdev_delete_cb callback, void *cb_context)
  195. {
  196. if (!soc || !soc->ops) {
  197. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  198. "%s: Invalid Instance:", __func__);
  199. QDF_BUG(0);
  200. return;
  201. }
  202. if (!soc->ops->cmn_drv_ops ||
  203. !soc->ops->cmn_drv_ops->txrx_vdev_detach)
  204. return;
  205. soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev,
  206. callback, cb_context);
  207. }
  208. static inline int
  209. cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
  210. {
  211. if (!soc || !soc->ops) {
  212. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  213. "%s: Invalid Instance:", __func__);
  214. QDF_BUG(0);
  215. return 0;
  216. }
  217. if (!soc->ops->cmn_drv_ops ||
  218. !soc->ops->cmn_drv_ops->txrx_pdev_attach_target)
  219. return 0;
  220. return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev);
  221. }
  222. static inline struct cdp_pdev *cdp_pdev_attach
  223. (ol_txrx_soc_handle soc, struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
  224. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
  225. {
  226. if (!soc || !soc->ops) {
  227. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  228. "%s: Invalid Instance:", __func__);
  229. QDF_BUG(0);
  230. return NULL;
  231. }
  232. if (!soc->ops->cmn_drv_ops ||
  233. !soc->ops->cmn_drv_ops->txrx_pdev_attach)
  234. return NULL;
  235. return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev,
  236. htc_pdev, osdev, pdev_id);
  237. }
  238. static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc,
  239. struct cdp_pdev *pdev)
  240. {
  241. if (!soc || !soc->ops) {
  242. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  243. "%s: Invalid Instance:", __func__);
  244. QDF_BUG(0);
  245. return 0;
  246. }
  247. if (!soc->ops->cmn_drv_ops ||
  248. !soc->ops->cmn_drv_ops->txrx_pdev_post_attach)
  249. return 0;
  250. return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev);
  251. }
  252. static inline void
  253. cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  254. {
  255. if (!soc || !soc->ops) {
  256. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  257. "%s: Invalid Instance:", __func__);
  258. QDF_BUG(0);
  259. return;
  260. }
  261. if (!soc->ops->cmn_drv_ops ||
  262. !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach)
  263. return;
  264. soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force);
  265. }
  266. static inline void
  267. cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  268. {
  269. if (!soc || !soc->ops) {
  270. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  271. "%s: Invalid Instance:", __func__);
  272. QDF_BUG(0);
  273. return;
  274. }
  275. if (!soc->ops->cmn_drv_ops ||
  276. !soc->ops->cmn_drv_ops->txrx_pdev_detach)
  277. return;
  278. soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force);
  279. }
  280. static inline void
  281. cdp_pdev_deinit(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  282. {
  283. if (!soc || !soc->ops) {
  284. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  285. "%s: Invalid Instance:", __func__);
  286. QDF_BUG(0);
  287. return;
  288. }
  289. if (!soc->ops->cmn_drv_ops ||
  290. !soc->ops->cmn_drv_ops->txrx_pdev_deinit)
  291. return;
  292. soc->ops->cmn_drv_ops->txrx_pdev_deinit(pdev, force);
  293. }
  294. static inline void *cdp_peer_create
  295. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  296. uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
  297. {
  298. if (!soc || !soc->ops) {
  299. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  300. "%s: Invalid Instance:", __func__);
  301. QDF_BUG(0);
  302. return NULL;
  303. }
  304. if (!soc->ops->cmn_drv_ops ||
  305. !soc->ops->cmn_drv_ops->txrx_peer_create)
  306. return NULL;
  307. return soc->ops->cmn_drv_ops->txrx_peer_create(vdev,
  308. peer_mac_addr, ctrl_peer);
  309. }
  310. static inline void cdp_peer_setup
  311. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  312. {
  313. if (!soc || !soc->ops) {
  314. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  315. "%s: Invalid Instance:", __func__);
  316. QDF_BUG(0);
  317. return;
  318. }
  319. if (!soc->ops->cmn_drv_ops ||
  320. !soc->ops->cmn_drv_ops->txrx_peer_setup)
  321. return;
  322. soc->ops->cmn_drv_ops->txrx_peer_setup(vdev,
  323. peer);
  324. }
  325. /*
  326. * cdp_cp_peer_del_response - Call the peer delete response handler
  327. * @soc: Datapath SOC handle
  328. * @vdev_hdl: virtual device object
  329. * @peer_mac_addr: Mac address of the peer
  330. *
  331. * Return: void
  332. */
  333. static inline void cdp_cp_peer_del_response
  334. (ol_txrx_soc_handle soc,
  335. struct cdp_vdev *vdev_hdl,
  336. uint8_t *peer_mac_addr)
  337. {
  338. if (!soc || !soc->ops) {
  339. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  340. "%s: Invalid Instance:", __func__);
  341. QDF_BUG(0);
  342. return;
  343. }
  344. if (!soc->ops->cmn_drv_ops ||
  345. !soc->ops->cmn_drv_ops->txrx_cp_peer_del_response)
  346. return;
  347. return soc->ops->cmn_drv_ops->txrx_cp_peer_del_response(soc,
  348. vdev_hdl,
  349. peer_mac_addr);
  350. }
  351. /**
  352. * cdp_peer_get_ast_info_by_soc() - search the soc AST hash table
  353. * and return ast entry information
  354. * of first ast entry found in the
  355. * table with given mac address
  356. *
  357. * @soc - data path soc handle
  358. * @ast_mac_addr - AST entry mac address
  359. * @ast_entry_info - ast entry information
  360. *
  361. * return - true if ast entry found with ast_mac_addr
  362. * false if ast entry not found
  363. */
  364. static inline bool cdp_peer_get_ast_info_by_soc
  365. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  366. struct cdp_ast_entry_info *ast_entry_info)
  367. {
  368. if (!soc || !soc->ops) {
  369. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  370. "%s: Invalid Instance:", __func__);
  371. QDF_BUG(0);
  372. return false;
  373. }
  374. if (!soc->ops->cmn_drv_ops ||
  375. !soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_soc)
  376. return false;
  377. return soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_soc
  378. (soc, ast_mac_addr,
  379. ast_entry_info);
  380. }
  381. /**
  382. * cdp_peer_get_ast_info_by_pdev() - search the soc AST hash table
  383. * and return ast entry information
  384. * if mac address and pdev_id matches
  385. *
  386. * @soc - data path soc handle
  387. * @ast_mac_addr - AST entry mac address
  388. * @pdev_id - pdev_id
  389. * @ast_entry_info - ast entry information
  390. *
  391. * return - true if ast entry found with ast_mac_addr
  392. * false if ast entry not found
  393. */
  394. static inline bool cdp_peer_get_ast_info_by_pdev
  395. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  396. uint8_t pdev_id,
  397. struct cdp_ast_entry_info *ast_entry_info)
  398. {
  399. if (!soc || !soc->ops) {
  400. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  401. "%s: Invalid Instance:", __func__);
  402. QDF_BUG(0);
  403. return false;
  404. }
  405. if (!soc->ops->cmn_drv_ops ||
  406. !soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_pdev)
  407. return false;
  408. return soc->ops->cmn_drv_ops->txrx_peer_get_ast_info_by_pdev
  409. (soc,
  410. ast_mac_addr,
  411. pdev_id,
  412. ast_entry_info);
  413. }
  414. /**
  415. * cdp_peer_ast_delete_by_soc() - delete the ast entry from soc AST hash table
  416. * with given mac address
  417. *
  418. * @soc - data path soc handle
  419. * @ast_mac_addr - AST entry mac address
  420. * @callback - callback function to called on ast delete response from FW
  421. * @cookie - argument to be passed to callback
  422. *
  423. * return - QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
  424. * is sent
  425. * QDF_STATUS_E_INVAL false if ast entry not found
  426. */
  427. static inline QDF_STATUS cdp_peer_ast_delete_by_soc
  428. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  429. txrx_ast_free_cb callback,
  430. void *cookie)
  431. {
  432. if (!soc || !soc->ops) {
  433. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  434. "%s: Invalid Instance:", __func__);
  435. QDF_BUG(0);
  436. return QDF_STATUS_E_INVAL;
  437. }
  438. if (!soc->ops->cmn_drv_ops ||
  439. !soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_soc)
  440. return QDF_STATUS_E_INVAL;
  441. return soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_soc
  442. (soc,
  443. ast_mac_addr,
  444. callback,
  445. cookie);
  446. }
  447. /**
  448. * cdp_peer_ast_delete_by_pdev() - delete the ast entry from soc AST hash table
  449. * if mac address and pdev_id matches
  450. *
  451. * @soc - data path soc handle
  452. * @ast_mac_addr - AST entry mac address
  453. * @pdev_id - pdev id
  454. * @callback - callback function to called on ast delete response from FW
  455. * @cookie - argument to be passed to callback
  456. *
  457. * return - QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
  458. * is sent
  459. * QDF_STATUS_E_INVAL false if ast entry not found
  460. */
  461. static inline QDF_STATUS cdp_peer_ast_delete_by_pdev
  462. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr,
  463. uint8_t pdev_id, txrx_ast_free_cb callback,
  464. void *cookie)
  465. {
  466. if (!soc || !soc->ops) {
  467. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  468. "%s: Invalid Instance:", __func__);
  469. QDF_BUG(0);
  470. return QDF_STATUS_E_INVAL;
  471. }
  472. if (!soc->ops->cmn_drv_ops ||
  473. !soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_pdev)
  474. return QDF_STATUS_E_INVAL;
  475. return soc->ops->cmn_drv_ops->txrx_peer_ast_delete_by_pdev
  476. (soc,
  477. ast_mac_addr,
  478. pdev_id,
  479. callback,
  480. cookie);
  481. }
  482. static inline int cdp_peer_add_ast
  483. (ol_txrx_soc_handle soc, struct cdp_peer *peer_handle,
  484. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, uint32_t flags)
  485. {
  486. if (!soc || !soc->ops) {
  487. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  488. "%s: Invalid Instance:", __func__);
  489. QDF_BUG(0);
  490. return 0;
  491. }
  492. if (!soc->ops->cmn_drv_ops ||
  493. !soc->ops->cmn_drv_ops->txrx_peer_add_ast)
  494. return 0;
  495. return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc,
  496. peer_handle,
  497. mac_addr,
  498. type,
  499. flags);
  500. }
  501. static inline void cdp_peer_reset_ast
  502. (ol_txrx_soc_handle soc, uint8_t *wds_macaddr, uint8_t *peer_macaddr,
  503. void *vdev_hdl)
  504. {
  505. if (!soc || !soc->ops) {
  506. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  507. "%s: Invalid Instance:", __func__);
  508. QDF_BUG(0);
  509. return;
  510. }
  511. if (!soc->ops->cmn_drv_ops ||
  512. !soc->ops->cmn_drv_ops->txrx_peer_reset_ast)
  513. return;
  514. soc->ops->cmn_drv_ops->txrx_peer_reset_ast(soc, wds_macaddr,
  515. peer_macaddr, vdev_hdl);
  516. }
  517. static inline void cdp_peer_reset_ast_table
  518. (ol_txrx_soc_handle soc, void *vdev_hdl)
  519. {
  520. if (!soc || !soc->ops) {
  521. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  522. "%s: Invalid Instance:", __func__);
  523. QDF_BUG(0);
  524. return;
  525. }
  526. if (!soc->ops->cmn_drv_ops ||
  527. !soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table)
  528. return;
  529. soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table(soc, vdev_hdl);
  530. }
  531. static inline void cdp_peer_flush_ast_table
  532. (ol_txrx_soc_handle soc)
  533. {
  534. if (!soc || !soc->ops) {
  535. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  536. "%s: Invalid Instance:", __func__);
  537. QDF_BUG(0);
  538. return;
  539. }
  540. if (!soc->ops->cmn_drv_ops ||
  541. !soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table)
  542. return;
  543. soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table(soc);
  544. }
  545. static inline int cdp_peer_update_ast
  546. (ol_txrx_soc_handle soc, uint8_t *wds_macaddr,
  547. struct cdp_peer *peer_handle, uint32_t flags)
  548. {
  549. if (!soc || !soc->ops) {
  550. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  551. "%s: Invalid Instance:", __func__);
  552. QDF_BUG(0);
  553. return 0;
  554. }
  555. if (!soc->ops->cmn_drv_ops ||
  556. !soc->ops->cmn_drv_ops->txrx_peer_update_ast)
  557. return 0;
  558. return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc,
  559. peer_handle,
  560. wds_macaddr,
  561. flags);
  562. }
  563. static inline void cdp_peer_teardown
  564. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  565. {
  566. if (!soc || !soc->ops) {
  567. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  568. "%s: Invalid Instance:", __func__);
  569. QDF_BUG(0);
  570. return;
  571. }
  572. if (!soc->ops->cmn_drv_ops ||
  573. !soc->ops->cmn_drv_ops->txrx_peer_teardown)
  574. return;
  575. soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev, peer);
  576. }
  577. static inline void
  578. cdp_vdev_flush_peers(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  579. bool unmap_only)
  580. {
  581. if (!soc || !soc->ops) {
  582. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  583. "%s: Invalid Instance:", __func__);
  584. QDF_BUG(0);
  585. return;
  586. }
  587. if (!soc->ops->cmn_drv_ops ||
  588. !soc->ops->cmn_drv_ops->txrx_vdev_flush_peers)
  589. return;
  590. soc->ops->cmn_drv_ops->txrx_vdev_flush_peers(vdev, unmap_only);
  591. }
  592. static inline void
  593. cdp_peer_delete(ol_txrx_soc_handle soc, void *peer, uint32_t bitmap)
  594. {
  595. if (!soc || !soc->ops) {
  596. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  597. "%s: Invalid Instance:", __func__);
  598. QDF_BUG(0);
  599. return;
  600. }
  601. if (!soc->ops->cmn_drv_ops ||
  602. !soc->ops->cmn_drv_ops->txrx_peer_delete)
  603. return;
  604. soc->ops->cmn_drv_ops->txrx_peer_delete(peer, bitmap);
  605. }
  606. static inline void
  607. cdp_peer_delete_sync(ol_txrx_soc_handle soc, void *peer,
  608. QDF_STATUS(*delete_cb)(
  609. uint8_t vdev_id,
  610. uint32_t peerid_cnt,
  611. uint16_t *peerid_list),
  612. uint32_t bitmap)
  613. {
  614. if (!soc || !soc->ops) {
  615. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  616. "%s: Invalid Instance:", __func__);
  617. QDF_BUG(0);
  618. return;
  619. }
  620. if (!soc->ops->cmn_drv_ops ||
  621. !soc->ops->cmn_drv_ops->txrx_peer_delete_sync)
  622. return;
  623. soc->ops->cmn_drv_ops->txrx_peer_delete_sync(peer,
  624. delete_cb,
  625. bitmap);
  626. }
  627. static inline int
  628. cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  629. uint8_t smart_monitor)
  630. {
  631. if (!soc || !soc->ops) {
  632. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  633. "%s: Invalid Instance:", __func__);
  634. QDF_BUG(0);
  635. return 0;
  636. }
  637. if (!soc->ops->cmn_drv_ops ||
  638. !soc->ops->cmn_drv_ops->txrx_set_monitor_mode)
  639. return 0;
  640. return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev,
  641. smart_monitor);
  642. }
  643. static inline void
  644. cdp_set_curchan(ol_txrx_soc_handle soc,
  645. struct cdp_pdev *pdev,
  646. uint32_t chan_mhz)
  647. {
  648. if (!soc || !soc->ops) {
  649. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  650. "%s: Invalid Instance:", __func__);
  651. QDF_BUG(0);
  652. return;
  653. }
  654. if (!soc->ops->cmn_drv_ops ||
  655. !soc->ops->cmn_drv_ops->txrx_set_curchan)
  656. return;
  657. soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz);
  658. }
  659. static inline void
  660. cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  661. void *filter, uint32_t num)
  662. {
  663. if (!soc || !soc->ops) {
  664. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  665. "%s: Invalid Instance:", __func__);
  666. QDF_BUG(0);
  667. return;
  668. }
  669. if (!soc->ops->cmn_drv_ops ||
  670. !soc->ops->cmn_drv_ops->txrx_set_privacy_filters)
  671. return;
  672. soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev,
  673. filter, num);
  674. }
  675. static inline int
  676. cdp_set_monitor_filter(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  677. struct cdp_monitor_filter *filter_val)
  678. {
  679. if (soc->ops->mon_ops->txrx_set_advance_monitor_filter)
  680. return soc->ops->mon_ops->txrx_set_advance_monitor_filter(pdev,
  681. filter_val);
  682. return 0;
  683. }
  684. /******************************************************************************
  685. * Data Interface (B Interface)
  686. *****************************************************************************/
  687. static inline void
  688. cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  689. void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
  690. struct ol_txrx_ops *txrx_ops)
  691. {
  692. if (!soc || !soc->ops) {
  693. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  694. "%s: Invalid Instance:", __func__);
  695. QDF_BUG(0);
  696. return;
  697. }
  698. if (!soc->ops->cmn_drv_ops ||
  699. !soc->ops->cmn_drv_ops->txrx_vdev_register)
  700. return;
  701. soc->ops->cmn_drv_ops->txrx_vdev_register(vdev,
  702. osif_vdev, ctrl_vdev, txrx_ops);
  703. }
  704. static inline int
  705. cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  706. qdf_nbuf_t tx_mgmt_frm, uint8_t type)
  707. {
  708. if (!soc || !soc->ops) {
  709. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  710. "%s: Invalid Instance:", __func__);
  711. QDF_BUG(0);
  712. return 0;
  713. }
  714. if (!soc->ops->cmn_drv_ops ||
  715. !soc->ops->cmn_drv_ops->txrx_mgmt_send)
  716. return 0;
  717. return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev,
  718. tx_mgmt_frm, type);
  719. }
  720. static inline int
  721. cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  722. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  723. uint8_t use_6mbps, uint16_t chanfreq)
  724. {
  725. if (!soc || !soc->ops) {
  726. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  727. "%s: Invalid Instance:", __func__);
  728. QDF_BUG(0);
  729. return 0;
  730. }
  731. if (!soc->ops->cmn_drv_ops ||
  732. !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext)
  733. return 0;
  734. return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext
  735. (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq);
  736. }
  737. static inline void
  738. cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  739. uint8_t type, ol_txrx_mgmt_tx_cb download_cb,
  740. ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
  741. {
  742. if (!soc || !soc->ops) {
  743. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  744. "%s: Invalid Instance:", __func__);
  745. QDF_BUG(0);
  746. return;
  747. }
  748. if (!soc->ops->cmn_drv_ops ||
  749. !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set)
  750. return;
  751. soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set
  752. (pdev, type, download_cb, ota_ack_cb, ctxt);
  753. }
  754. static inline void
  755. cdp_peer_unmap_sync_cb_set(ol_txrx_soc_handle soc,
  756. struct cdp_pdev *pdev,
  757. QDF_STATUS(*unmap_resp_cb)(
  758. uint8_t vdev_id,
  759. uint32_t peerid_cnt,
  760. uint16_t *peerid_list))
  761. {
  762. if (!soc || !soc->ops) {
  763. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  764. "%s: Invalid Instance:", __func__);
  765. QDF_BUG(0);
  766. return;
  767. }
  768. if (!soc->ops->cmn_drv_ops ||
  769. !soc->ops->cmn_drv_ops->txrx_peer_unmap_sync_cb_set)
  770. return;
  771. soc->ops->cmn_drv_ops->txrx_peer_unmap_sync_cb_set(pdev, unmap_resp_cb);
  772. }
  773. static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc,
  774. struct cdp_pdev *pdev)
  775. {
  776. if (!soc || !soc->ops) {
  777. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  778. "%s: Invalid Instance:", __func__);
  779. QDF_BUG(0);
  780. return 0;
  781. }
  782. if (!soc->ops->cmn_drv_ops ||
  783. !soc->ops->cmn_drv_ops->txrx_get_tx_pending)
  784. return 0;
  785. return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev);
  786. }
  787. static inline void
  788. cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev,
  789. ol_txrx_data_tx_cb callback, void *ctxt)
  790. {
  791. if (!soc || !soc->ops) {
  792. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  793. "%s: Invalid Instance:", __func__);
  794. QDF_BUG(0);
  795. return;
  796. }
  797. if (!soc->ops->cmn_drv_ops ||
  798. !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set)
  799. return;
  800. soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev,
  801. callback, ctxt);
  802. }
  803. /******************************************************************************
  804. * Statistics and Debugging Interface (C Interface)
  805. *****************************************************************************/
  806. /**
  807. * External Device physical address types
  808. *
  809. * Currently, both MAC and IPA uController use the same size addresses
  810. * and descriptors are exchanged between these two depending on the mode.
  811. *
  812. * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA
  813. * operations. However, external device physical address sizes
  814. * may be different from host-specific physical address sizes.
  815. * This calls for the following definitions for target devices
  816. * (MAC, IPA uc).
  817. */
  818. #if HTT_PADDR64
  819. typedef uint64_t target_paddr_t;
  820. #else
  821. typedef uint32_t target_paddr_t;
  822. #endif /*HTT_PADDR64 */
  823. static inline int
  824. cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  825. int max_subfrms_ampdu,
  826. int max_subfrms_amsdu)
  827. {
  828. if (!soc || !soc->ops) {
  829. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  830. "%s: Invalid Instance:", __func__);
  831. QDF_BUG(0);
  832. return 0;
  833. }
  834. if (!soc->ops->cmn_drv_ops ||
  835. !soc->ops->cmn_drv_ops->txrx_aggr_cfg)
  836. return 0;
  837. return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev,
  838. max_subfrms_ampdu, max_subfrms_amsdu);
  839. }
  840. static inline int
  841. cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  842. struct ol_txrx_stats_req *req, bool per_vdev,
  843. bool response_expected)
  844. {
  845. if (!soc || !soc->ops) {
  846. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  847. "%s: Invalid Instance:", __func__);
  848. QDF_BUG(0);
  849. return 0;
  850. }
  851. if (!soc->ops->cmn_drv_ops ||
  852. !soc->ops->cmn_drv_ops->txrx_fw_stats_get)
  853. return 0;
  854. return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req,
  855. per_vdev, response_expected);
  856. }
  857. static inline int
  858. cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs)
  859. {
  860. if (!soc || !soc->ops) {
  861. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  862. "%s: Invalid Instance:", __func__);
  863. QDF_BUG(0);
  864. return 0;
  865. }
  866. if (!soc->ops->cmn_drv_ops ||
  867. !soc->ops->cmn_drv_ops->txrx_debug)
  868. return 0;
  869. return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs);
  870. }
  871. static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc,
  872. struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val)
  873. {
  874. if (!soc || !soc->ops) {
  875. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  876. "%s: Invalid Instance:", __func__);
  877. QDF_BUG(0);
  878. return;
  879. }
  880. if (!soc->ops->cmn_drv_ops ||
  881. !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg)
  882. return;
  883. soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev,
  884. cfg_stats_type, cfg_val);
  885. }
  886. static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level)
  887. {
  888. if (!soc || !soc->ops) {
  889. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  890. "%s: Invalid Instance:", __func__);
  891. QDF_BUG(0);
  892. return;
  893. }
  894. if (!soc->ops->cmn_drv_ops ||
  895. !soc->ops->cmn_drv_ops->txrx_print_level_set)
  896. return;
  897. soc->ops->cmn_drv_ops->txrx_print_level_set(level);
  898. }
  899. static inline uint8_t *
  900. cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  901. {
  902. if (!soc || !soc->ops) {
  903. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  904. "%s: Invalid Instance:", __func__);
  905. QDF_BUG(0);
  906. return NULL;
  907. }
  908. if (!soc->ops->cmn_drv_ops ||
  909. !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr)
  910. return NULL;
  911. return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev);
  912. }
  913. /**
  914. * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  915. * vdev
  916. * @vdev: vdev handle
  917. *
  918. * Return: Handle to struct qdf_mac_addr
  919. */
  920. static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr
  921. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  922. {
  923. if (!soc || !soc->ops) {
  924. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  925. "%s: Invalid Instance:", __func__);
  926. QDF_BUG(0);
  927. return NULL;
  928. }
  929. if (!soc->ops->cmn_drv_ops ||
  930. !soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr)
  931. return NULL;
  932. return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr
  933. (vdev);
  934. }
  935. /**
  936. * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev
  937. * @vdev: vdev handle
  938. *
  939. * Return: Handle to pdev
  940. */
  941. static inline struct cdp_pdev *cdp_get_pdev_from_vdev
  942. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  943. {
  944. if (!soc || !soc->ops) {
  945. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  946. "%s: Invalid Instance:", __func__);
  947. QDF_BUG(0);
  948. return NULL;
  949. }
  950. if (!soc->ops->cmn_drv_ops ||
  951. !soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev)
  952. return NULL;
  953. return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev);
  954. }
  955. /**
  956. * cdp_get_os_rx_handles_from_vdev() - Return os rx handles for a vdev
  957. * @soc: ol_txrx_soc_handle handle
  958. * @vdev: vdev for which os rx handles are needed
  959. * @stack_fn_p: pointer to stack function pointer
  960. * @osif_handle_p: pointer to ol_osif_vdev_handle
  961. *
  962. * Return: void
  963. */
  964. static inline
  965. void cdp_get_os_rx_handles_from_vdev(ol_txrx_soc_handle soc,
  966. struct cdp_vdev *vdev,
  967. ol_txrx_rx_fp *stack_fn_p,
  968. ol_osif_vdev_handle *osif_handle_p)
  969. {
  970. if (!soc || !soc->ops) {
  971. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  972. "%s: Invalid Instance:", __func__);
  973. QDF_BUG(0);
  974. return;
  975. }
  976. if (!soc->ops->cmn_drv_ops ||
  977. !soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev)
  978. return;
  979. soc->ops->cmn_drv_ops->txrx_get_os_rx_handles_from_vdev(vdev,
  980. stack_fn_p,
  981. osif_handle_p);
  982. }
  983. /**
  984. * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  985. * @vdev: vdev handle
  986. *
  987. * Return: Handle to control pdev
  988. */
  989. static inline struct cdp_cfg *
  990. cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  991. {
  992. if (!soc || !soc->ops) {
  993. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  994. "%s: Invalid Instance:", __func__);
  995. QDF_BUG(0);
  996. return NULL;
  997. }
  998. if (!soc->ops->cmn_drv_ops ||
  999. !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev)
  1000. return NULL;
  1001. return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev
  1002. (vdev);
  1003. }
  1004. static inline struct cdp_vdev *
  1005. cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  1006. uint8_t vdev_id)
  1007. {
  1008. if (!soc || !soc->ops) {
  1009. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1010. "%s: Invalid Instance:", __func__);
  1011. QDF_BUG(0);
  1012. return NULL;
  1013. }
  1014. if (!soc->ops->cmn_drv_ops ||
  1015. !soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id)
  1016. return NULL;
  1017. return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id
  1018. (pdev, vdev_id);
  1019. }
  1020. static inline struct cdp_vdev *
  1021. cdp_get_mon_vdev_from_pdev(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
  1022. {
  1023. if (!soc || !soc->ops) {
  1024. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1025. "%s: Invalid Instance:", __func__);
  1026. QDF_BUG(0);
  1027. return NULL;
  1028. }
  1029. if (!soc->ops->cmn_drv_ops ||
  1030. !soc->ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev)
  1031. return NULL;
  1032. return soc->ops->cmn_drv_ops->txrx_get_mon_vdev_from_pdev
  1033. (pdev);
  1034. }
  1035. static inline void
  1036. cdp_soc_detach(ol_txrx_soc_handle soc)
  1037. {
  1038. if (!soc || !soc->ops) {
  1039. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1040. "%s: Invalid Instance:", __func__);
  1041. QDF_BUG(0);
  1042. return;
  1043. }
  1044. if (!soc->ops->cmn_drv_ops ||
  1045. !soc->ops->cmn_drv_ops->txrx_soc_detach)
  1046. return;
  1047. soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc);
  1048. }
  1049. /**
  1050. * cdp_soc_init() - Initialize txrx SOC
  1051. * @soc: ol_txrx_soc_handle handle
  1052. * @devid: Device ID
  1053. * @hif_handle: Opaque HIF handle
  1054. * @psoc: Opaque Objmgr handle
  1055. * @htc_handle: Opaque HTC handle
  1056. * @qdf_dev: QDF device
  1057. * @dp_ol_if_ops: Offload Operations
  1058. *
  1059. * Return: DP SOC handle on success, NULL on failure
  1060. */
  1061. static inline ol_txrx_soc_handle
  1062. cdp_soc_init(ol_txrx_soc_handle soc, u_int16_t devid, void *hif_handle,
  1063. void *psoc, void *htc_handle, qdf_device_t qdf_dev,
  1064. struct ol_if_ops *dp_ol_if_ops)
  1065. {
  1066. if (!soc || !soc->ops) {
  1067. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1068. "%s: Invalid Instance:", __func__);
  1069. QDF_BUG(0);
  1070. return NULL;
  1071. }
  1072. if (!soc->ops->cmn_drv_ops ||
  1073. !soc->ops->cmn_drv_ops->txrx_soc_init)
  1074. return NULL;
  1075. return soc->ops->cmn_drv_ops->txrx_soc_init(soc, psoc,
  1076. hif_handle,
  1077. htc_handle, qdf_dev,
  1078. dp_ol_if_ops, devid);
  1079. }
  1080. /**
  1081. * cdp_soc_deinit() - Deinitialize txrx SOC
  1082. * @soc: Opaque DP SOC handle
  1083. *
  1084. * Return: None
  1085. */
  1086. static inline void
  1087. cdp_soc_deinit(ol_txrx_soc_handle soc)
  1088. {
  1089. if (!soc || !soc->ops) {
  1090. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1091. "%s: Invalid Instance:", __func__);
  1092. QDF_BUG(0);
  1093. return;
  1094. }
  1095. if (!soc->ops->cmn_drv_ops ||
  1096. !soc->ops->cmn_drv_ops->txrx_soc_deinit)
  1097. return;
  1098. soc->ops->cmn_drv_ops->txrx_soc_deinit((void *)soc);
  1099. }
  1100. /**
  1101. * cdp_tso_soc_attach() - TSO attach function
  1102. * @soc: ol_txrx_soc_handle handle
  1103. *
  1104. * Reserve TSO descriptor buffers
  1105. *
  1106. * Return: QDF_STATUS_SUCCESS on Success or
  1107. * QDF_STATUS_E_FAILURE on failure
  1108. */
  1109. static inline QDF_STATUS
  1110. cdp_tso_soc_attach(ol_txrx_soc_handle soc)
  1111. {
  1112. if (!soc || !soc->ops) {
  1113. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1114. "%s: Invalid Instance:", __func__);
  1115. QDF_BUG(0);
  1116. return 0;
  1117. }
  1118. if (!soc->ops->cmn_drv_ops ||
  1119. !soc->ops->cmn_drv_ops->txrx_tso_soc_attach)
  1120. return 0;
  1121. return soc->ops->cmn_drv_ops->txrx_tso_soc_attach((void *)soc);
  1122. }
  1123. /**
  1124. * cdp_tso_soc_detach() - TSO detach function
  1125. * @soc: ol_txrx_soc_handle handle
  1126. *
  1127. * Release TSO descriptor buffers
  1128. *
  1129. * Return: QDF_STATUS_SUCCESS on Success or
  1130. * QDF_STATUS_E_FAILURE on failure
  1131. */
  1132. static inline QDF_STATUS
  1133. cdp_tso_soc_detach(ol_txrx_soc_handle soc)
  1134. {
  1135. if (!soc || !soc->ops) {
  1136. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1137. "%s: Invalid Instance:", __func__);
  1138. QDF_BUG(0);
  1139. return 0;
  1140. }
  1141. if (!soc->ops->cmn_drv_ops ||
  1142. !soc->ops->cmn_drv_ops->txrx_tso_soc_detach)
  1143. return 0;
  1144. return soc->ops->cmn_drv_ops->txrx_tso_soc_detach((void *)soc);
  1145. }
  1146. /**
  1147. * cdp_addba_resp_tx_completion() - Indicate addba response tx
  1148. * completion to dp to change tid state.
  1149. * @soc: soc handle
  1150. * @peer_handle: peer handle
  1151. * @tid: tid
  1152. * @status: Tx completion status
  1153. *
  1154. * Return: success/failure of tid update
  1155. */
  1156. static inline int cdp_addba_resp_tx_completion(ol_txrx_soc_handle soc,
  1157. void *peer_handle,
  1158. uint8_t tid, int status)
  1159. {
  1160. if (!soc || !soc->ops) {
  1161. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1162. "%s: Invalid Instance:", __func__);
  1163. QDF_BUG(0);
  1164. return 0;
  1165. }
  1166. if (!soc->ops->cmn_drv_ops ||
  1167. !soc->ops->cmn_drv_ops->addba_resp_tx_completion)
  1168. return 0;
  1169. return soc->ops->cmn_drv_ops->addba_resp_tx_completion(peer_handle, tid,
  1170. status);
  1171. }
  1172. static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc,
  1173. void *peer_handle, uint8_t dialogtoken, uint16_t tid,
  1174. uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum)
  1175. {
  1176. if (!soc || !soc->ops) {
  1177. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1178. "%s: Invalid Instance:", __func__);
  1179. QDF_BUG(0);
  1180. return 0;
  1181. }
  1182. if (!soc->ops->cmn_drv_ops ||
  1183. !soc->ops->cmn_drv_ops->addba_requestprocess)
  1184. return 0;
  1185. return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle,
  1186. dialogtoken, tid, batimeout, buffersize, startseqnum);
  1187. }
  1188. static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc,
  1189. void *peer_handle, uint8_t tid, uint8_t *dialogtoken,
  1190. uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout)
  1191. {
  1192. if (!soc || !soc->ops) {
  1193. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1194. "%s: Invalid Instance:", __func__);
  1195. QDF_BUG(0);
  1196. return;
  1197. }
  1198. if (!soc->ops->cmn_drv_ops ||
  1199. !soc->ops->cmn_drv_ops->addba_responsesetup)
  1200. return;
  1201. soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid,
  1202. dialogtoken, statuscode, buffersize, batimeout);
  1203. }
  1204. static inline int cdp_delba_process(ol_txrx_soc_handle soc,
  1205. void *peer_handle, int tid, uint16_t reasoncode)
  1206. {
  1207. if (!soc || !soc->ops) {
  1208. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1209. "%s: Invalid Instance:", __func__);
  1210. QDF_BUG(0);
  1211. return 0;
  1212. }
  1213. if (!soc->ops->cmn_drv_ops ||
  1214. !soc->ops->cmn_drv_ops->delba_process)
  1215. return 0;
  1216. return soc->ops->cmn_drv_ops->delba_process(peer_handle,
  1217. tid, reasoncode);
  1218. }
  1219. /**
  1220. * cdp_delba_tx_completion() - Handle delba tx completion
  1221. * to update stats and retry transmission if failed.
  1222. * @soc: soc handle
  1223. * @peer_handle: peer handle
  1224. * @tid: Tid number
  1225. * @status: Tx completion status
  1226. *
  1227. * Return: 0 on Success, 1 on failure
  1228. */
  1229. static inline int cdp_delba_tx_completion(ol_txrx_soc_handle soc,
  1230. void *peer_handle,
  1231. uint8_t tid, int status)
  1232. {
  1233. if (!soc || !soc->ops) {
  1234. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1235. "%s: Invalid Instance:", __func__);
  1236. QDF_BUG(0);
  1237. return 0;
  1238. }
  1239. if (!soc->ops->cmn_drv_ops ||
  1240. !soc->ops->cmn_drv_ops->delba_tx_completion)
  1241. return 0;
  1242. return soc->ops->cmn_drv_ops->delba_tx_completion(peer_handle,
  1243. tid, status);
  1244. }
  1245. static inline void cdp_set_addbaresponse(ol_txrx_soc_handle soc,
  1246. void *peer_handle, int tid, uint16_t statuscode)
  1247. {
  1248. if (!soc || !soc->ops) {
  1249. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1250. "%s: Invalid Instance:", __func__);
  1251. QDF_BUG(0);
  1252. return;
  1253. }
  1254. if (!soc->ops->cmn_drv_ops ||
  1255. !soc->ops->cmn_drv_ops->set_addba_response)
  1256. return;
  1257. soc->ops->cmn_drv_ops->set_addba_response(peer_handle, tid, statuscode);
  1258. }
  1259. /**
  1260. * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer
  1261. * mac address
  1262. * @soc: SOC handle
  1263. * @peer_id: peer id of the peer for which mac_address is required
  1264. * @mac_addr: reference to mac address
  1265. *
  1266. * reutm: vdev_id of the vap
  1267. */
  1268. static inline uint8_t
  1269. cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id,
  1270. uint8_t *mac_addr)
  1271. {
  1272. if (!soc || !soc->ops) {
  1273. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1274. "%s: Invalid Instance:", __func__);
  1275. QDF_BUG(0);
  1276. return CDP_INVALID_VDEV_ID;
  1277. }
  1278. if (!soc->ops->cmn_drv_ops ||
  1279. !soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id)
  1280. return CDP_INVALID_VDEV_ID;
  1281. return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc,
  1282. peer_id, mac_addr);
  1283. }
  1284. /**
  1285. * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap
  1286. * @vdev: vdev handle
  1287. * @map_id: id of the tid map
  1288. *
  1289. * Return: void
  1290. */
  1291. static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc,
  1292. struct cdp_vdev *vdev, uint8_t map_id)
  1293. {
  1294. if (!soc || !soc->ops) {
  1295. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1296. "%s: Invalid Instance:", __func__);
  1297. QDF_BUG(0);
  1298. return;
  1299. }
  1300. if (!soc->ops->cmn_drv_ops ||
  1301. !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map)
  1302. return;
  1303. soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev,
  1304. map_id);
  1305. }
  1306. /**
  1307. * cdp_ath_get_total_per(): function to get hw retries
  1308. * @soc : soc handle
  1309. * @pdev: pdev handle
  1310. *
  1311. * Return: get hw retries
  1312. */
  1313. static inline
  1314. int cdp_ath_get_total_per(ol_txrx_soc_handle soc,
  1315. struct cdp_pdev *pdev)
  1316. {
  1317. if (!soc || !soc->ops) {
  1318. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1319. "%s: Invalid Instance:", __func__);
  1320. QDF_BUG(0);
  1321. return 0;
  1322. }
  1323. if (!soc->ops->cmn_drv_ops ||
  1324. !soc->ops->cmn_drv_ops->txrx_get_total_per)
  1325. return 0;
  1326. return soc->ops->cmn_drv_ops->txrx_get_total_per(pdev);
  1327. }
  1328. /**
  1329. * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map
  1330. * @pdev: pdev handle
  1331. * @map_id: id of the tid map
  1332. * @tos: index value in map that needs to be changed
  1333. * @tid: tid value passed by user
  1334. *
  1335. * Return: void
  1336. */
  1337. static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc,
  1338. struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid)
  1339. {
  1340. if (!soc || !soc->ops) {
  1341. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1342. "%s: Invalid Instance:", __func__);
  1343. QDF_BUG(0);
  1344. return;
  1345. }
  1346. if (!soc->ops->cmn_drv_ops ||
  1347. !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map)
  1348. return;
  1349. soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev,
  1350. map_id, tos, tid);
  1351. }
  1352. /**
  1353. * cdp_hmmc_tid_override_en(): Function to enable hmmc tid override.
  1354. * @soc : soc handle
  1355. * @pdev: pdev handle
  1356. * @val: hmmc-dscp flag value
  1357. *
  1358. * Return: void
  1359. */
  1360. static inline void cdp_hmmc_tid_override_en(ol_txrx_soc_handle soc,
  1361. struct cdp_pdev *pdev, bool val)
  1362. {
  1363. if (!soc || !soc->ops) {
  1364. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1365. "%s: Invalid Instance:", __func__);
  1366. QDF_BUG(0);
  1367. return;
  1368. }
  1369. if (!soc->ops->cmn_drv_ops ||
  1370. !soc->ops->cmn_drv_ops->hmmc_tid_override_en)
  1371. return;
  1372. soc->ops->cmn_drv_ops->hmmc_tid_override_en(pdev, val);
  1373. }
  1374. /**
  1375. * cdp_set_hmmc_tid_val(): Function to set hmmc tid value.
  1376. * @soc : soc handle
  1377. * @pdev: pdev handle
  1378. * @tid: tid value
  1379. *
  1380. * Return: void
  1381. */
  1382. static inline void cdp_set_hmmc_tid_val(ol_txrx_soc_handle soc,
  1383. struct cdp_pdev *pdev, uint8_t tid)
  1384. {
  1385. if (!soc || !soc->ops) {
  1386. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1387. "%s: Invalid Instance:", __func__);
  1388. QDF_BUG(0);
  1389. return;
  1390. }
  1391. if (!soc->ops->cmn_drv_ops ||
  1392. !soc->ops->cmn_drv_ops->set_hmmc_tid_val)
  1393. return;
  1394. soc->ops->cmn_drv_ops->set_hmmc_tid_val(pdev, tid);
  1395. }
  1396. /**
  1397. * cdp_flush_cache_rx_queue() - flush cache rx queue frame
  1398. *
  1399. * Return: None
  1400. */
  1401. static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc)
  1402. {
  1403. if (!soc || !soc->ops) {
  1404. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1405. "%s: Invalid Instance:", __func__);
  1406. QDF_BUG(0);
  1407. return;
  1408. }
  1409. if (!soc->ops->cmn_drv_ops ||
  1410. !soc->ops->cmn_drv_ops->flush_cache_rx_queue)
  1411. return;
  1412. soc->ops->cmn_drv_ops->flush_cache_rx_queue();
  1413. }
  1414. /**
  1415. * cdp_txrx_stats_request(): function to map to host and firmware statistics
  1416. * @soc: soc handle
  1417. * @vdev: virtual device
  1418. * @req: stats request container
  1419. *
  1420. * return: status
  1421. */
  1422. static inline
  1423. int cdp_txrx_stats_request(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  1424. struct cdp_txrx_stats_req *req)
  1425. {
  1426. if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) {
  1427. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1428. "%s: Invalid Instance:", __func__);
  1429. QDF_ASSERT(0);
  1430. return 0;
  1431. }
  1432. if (soc->ops->cmn_drv_ops->txrx_stats_request)
  1433. return soc->ops->cmn_drv_ops->txrx_stats_request(vdev, req);
  1434. return 0;
  1435. }
  1436. /**
  1437. * cdp_txrx_intr_attach(): function to attach and configure interrupt
  1438. * @soc: soc handle
  1439. */
  1440. static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc)
  1441. {
  1442. if (!soc || !soc->ops) {
  1443. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1444. "%s: Invalid Instance:", __func__);
  1445. QDF_BUG(0);
  1446. return 0;
  1447. }
  1448. if (!soc->ops->cmn_drv_ops ||
  1449. !soc->ops->cmn_drv_ops->txrx_intr_attach)
  1450. return 0;
  1451. return soc->ops->cmn_drv_ops->txrx_intr_attach(soc);
  1452. }
  1453. /**
  1454. * cdp_txrx_intr_detach(): function to detach interrupt
  1455. * @soc: soc handle
  1456. */
  1457. static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc)
  1458. {
  1459. if (!soc || !soc->ops) {
  1460. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1461. "%s: Invalid Instance:", __func__);
  1462. QDF_BUG(0);
  1463. return;
  1464. }
  1465. if (!soc->ops->cmn_drv_ops ||
  1466. !soc->ops->cmn_drv_ops->txrx_intr_detach)
  1467. return;
  1468. soc->ops->cmn_drv_ops->txrx_intr_detach(soc);
  1469. }
  1470. /**
  1471. * cdp_display_stats(): function to map to dump stats
  1472. * @soc: soc handle
  1473. * @value: statistics option
  1474. */
  1475. static inline QDF_STATUS
  1476. cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value,
  1477. enum qdf_stats_verbosity_level level)
  1478. {
  1479. if (!soc || !soc->ops) {
  1480. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1481. "%s: Invalid Instance:", __func__);
  1482. QDF_BUG(0);
  1483. return 0;
  1484. }
  1485. if (!soc->ops->cmn_drv_ops ||
  1486. !soc->ops->cmn_drv_ops->display_stats)
  1487. return 0;
  1488. return soc->ops->cmn_drv_ops->display_stats(soc, value, level);
  1489. }
  1490. /**
  1491. * cdp_set_pn_check(): function to set pn check
  1492. * @soc: soc handle
  1493. * @sec_type: security type
  1494. * #rx_pn: receive pn
  1495. */
  1496. static inline int cdp_set_pn_check(ol_txrx_soc_handle soc,
  1497. struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
  1498. {
  1499. if (!soc || !soc->ops) {
  1500. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1501. "%s: Invalid Instance:", __func__);
  1502. QDF_BUG(0);
  1503. return 0;
  1504. }
  1505. if (!soc->ops->cmn_drv_ops ||
  1506. !soc->ops->cmn_drv_ops->set_pn_check)
  1507. return 0;
  1508. soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle,
  1509. sec_type, rx_pn);
  1510. return 0;
  1511. }
  1512. static inline int cdp_set_key(ol_txrx_soc_handle soc,
  1513. struct cdp_peer *peer_handle,
  1514. bool is_unicast, uint32_t *key)
  1515. {
  1516. if (!soc || !soc->ops) {
  1517. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1518. "%s: Invalid Instance:", __func__);
  1519. QDF_BUG(0);
  1520. return 0;
  1521. }
  1522. if (!soc->ops->ctrl_ops ||
  1523. !soc->ops->ctrl_ops->set_key)
  1524. return 0;
  1525. soc->ops->ctrl_ops->set_key(peer_handle,
  1526. is_unicast, key);
  1527. return 0;
  1528. }
  1529. /**
  1530. * cdp_update_config_parameters(): function to propagate configuration
  1531. * parameters to datapath
  1532. * @soc: opaque soc handle
  1533. * @cfg: configuration handle
  1534. *
  1535. * Return: status: 0 - Success, non-zero: Failure
  1536. */
  1537. static inline
  1538. QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc,
  1539. struct cdp_config_params *cfg)
  1540. {
  1541. struct cdp_soc *psoc = (struct cdp_soc *)soc;
  1542. if (!soc || !soc->ops) {
  1543. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1544. "%s: Invalid Instance:", __func__);
  1545. QDF_BUG(0);
  1546. return 0;
  1547. }
  1548. if (!soc->ops->cmn_drv_ops ||
  1549. !soc->ops->cmn_drv_ops->update_config_parameters)
  1550. return QDF_STATUS_SUCCESS;
  1551. return soc->ops->cmn_drv_ops->update_config_parameters(psoc,
  1552. cfg);
  1553. }
  1554. /**
  1555. * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev
  1556. * @soc: opaque soc handle
  1557. * @pdev: data path pdev handle
  1558. *
  1559. * Return: opaque dp handle
  1560. */
  1561. static inline void *
  1562. cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev)
  1563. {
  1564. if (!soc || !soc->ops) {
  1565. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1566. "%s: Invalid Instance:", __func__);
  1567. QDF_BUG(0);
  1568. return 0;
  1569. }
  1570. if (soc->ops->cmn_drv_ops->get_dp_txrx_handle)
  1571. return soc->ops->cmn_drv_ops->get_dp_txrx_handle(pdev);
  1572. return 0;
  1573. }
  1574. /**
  1575. * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev
  1576. * @soc: opaque soc handle
  1577. * @pdev: data path pdev handle
  1578. * @dp_hdl: opaque pointer for dp_txrx_handle
  1579. *
  1580. * Return: void
  1581. */
  1582. static inline void
  1583. cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev, void *dp_hdl)
  1584. {
  1585. if (!soc || !soc->ops) {
  1586. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1587. "%s: Invalid Instance:", __func__);
  1588. QDF_BUG(0);
  1589. return;
  1590. }
  1591. if (!soc->ops->cmn_drv_ops ||
  1592. !soc->ops->cmn_drv_ops->set_dp_txrx_handle)
  1593. return;
  1594. soc->ops->cmn_drv_ops->set_dp_txrx_handle(pdev, dp_hdl);
  1595. }
  1596. /*
  1597. * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc
  1598. * @soc: opaque soc handle
  1599. *
  1600. * Return: opaque extended dp handle
  1601. */
  1602. static inline void *
  1603. cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc)
  1604. {
  1605. if (!soc || !soc->ops) {
  1606. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1607. "%s: Invalid Instance:", __func__);
  1608. QDF_BUG(0);
  1609. return NULL;
  1610. }
  1611. if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle)
  1612. return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle(
  1613. (struct cdp_soc *) soc);
  1614. return NULL;
  1615. }
  1616. /**
  1617. * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc
  1618. * @soc: opaque soc handle
  1619. * @dp_hdl: opaque pointer for dp_txrx_handle
  1620. *
  1621. * Return: void
  1622. */
  1623. static inline void
  1624. cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle)
  1625. {
  1626. if (!soc || !soc->ops) {
  1627. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1628. "%s: Invalid Instance:", __func__);
  1629. QDF_BUG(0);
  1630. return;
  1631. }
  1632. if (!soc->ops->cmn_drv_ops ||
  1633. !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle)
  1634. return;
  1635. soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc,
  1636. dp_handle);
  1637. }
  1638. /**
  1639. * cdp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
  1640. * @soc: opaque soc handle
  1641. * @pdev_handle: data path pdev handle
  1642. * @lmac_id: lmac id
  1643. *
  1644. * Return: void
  1645. */
  1646. static inline void
  1647. cdp_soc_map_pdev_to_lmac(ol_txrx_soc_handle soc, void *pdev_handle,
  1648. uint32_t lmac_id)
  1649. {
  1650. if (!soc || !soc->ops) {
  1651. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1652. "%s: Invalid Instance:", __func__);
  1653. QDF_BUG(0);
  1654. return;
  1655. }
  1656. if (!soc->ops->cmn_drv_ops ||
  1657. !soc->ops->cmn_drv_ops->map_pdev_to_lmac)
  1658. return;
  1659. soc->ops->cmn_drv_ops->map_pdev_to_lmac((struct cdp_pdev *)pdev_handle,
  1660. lmac_id);
  1661. }
  1662. /**
  1663. * cdp_tx_send() - enqueue frame for transmission
  1664. * @soc: soc opaque handle
  1665. * @vdev: VAP device
  1666. * @nbuf: nbuf to be enqueued
  1667. *
  1668. * This API is used by Extended Datapath modules to enqueue frame for
  1669. * transmission
  1670. *
  1671. * Return: void
  1672. */
  1673. static inline void
  1674. cdp_tx_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, qdf_nbuf_t nbuf)
  1675. {
  1676. if (!soc || !soc->ops) {
  1677. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1678. "%s: Invalid Instance:", __func__);
  1679. QDF_BUG(0);
  1680. return;
  1681. }
  1682. if (!soc->ops->cmn_drv_ops ||
  1683. !soc->ops->cmn_drv_ops->tx_send)
  1684. return;
  1685. soc->ops->cmn_drv_ops->tx_send(vdev, nbuf);
  1686. }
  1687. /*
  1688. * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev
  1689. * @soc: opaque soc handle
  1690. * @pdev: data path pdev handle
  1691. *
  1692. * Return: pdev_id
  1693. */
  1694. static inline
  1695. uint8_t cdp_get_pdev_id_frm_pdev(ol_txrx_soc_handle soc,
  1696. struct cdp_pdev *pdev)
  1697. {
  1698. if (soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev)
  1699. return soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev(pdev);
  1700. return 0;
  1701. }
  1702. /*
  1703. * cdp_get_vow_config_frm_pdev() - return carrier_vow_config from pdev
  1704. * @soc: opaque soc handle
  1705. * @pdev: data path pdev handle
  1706. *
  1707. * Return: carrier_vow_config
  1708. */
  1709. static inline
  1710. bool cdp_get_vow_config_frm_pdev(ol_txrx_soc_handle soc,
  1711. struct cdp_pdev *pdev)
  1712. {
  1713. if (soc->ops->cmn_drv_ops->txrx_get_vow_config_frm_pdev)
  1714. return soc->ops->cmn_drv_ops->txrx_get_vow_config_frm_pdev(
  1715. pdev);
  1716. return 0;
  1717. }
  1718. /**
  1719. * cdp_pdev_set_chan_noise_floor() - Set channel noise floor to DP layer
  1720. * @soc: opaque soc handle
  1721. * @pdev: data path pdev handle
  1722. * @chan_noise_floor: Channel Noise Floor (in dbM) obtained from control path
  1723. *
  1724. * Return: None
  1725. */
  1726. static inline
  1727. void cdp_pdev_set_chan_noise_floor(ol_txrx_soc_handle soc,
  1728. struct cdp_pdev *pdev,
  1729. int16_t chan_noise_floor)
  1730. {
  1731. if (soc->ops->cmn_drv_ops->txrx_pdev_set_chan_noise_floor)
  1732. return soc->ops->cmn_drv_ops->txrx_pdev_set_chan_noise_floor(
  1733. pdev, chan_noise_floor);
  1734. }
  1735. /**
  1736. * cdp_set_nac() - set nac
  1737. * @soc: opaque soc handle
  1738. * @peer: data path peer handle
  1739. *
  1740. */
  1741. static inline
  1742. void cdp_set_nac(ol_txrx_soc_handle soc,
  1743. struct cdp_peer *peer)
  1744. {
  1745. if (soc->ops->cmn_drv_ops->txrx_set_nac)
  1746. soc->ops->cmn_drv_ops->txrx_set_nac(peer);
  1747. }
  1748. /**
  1749. * cdp_set_pdev_tx_capture() - set pdev tx_capture
  1750. * @soc: opaque soc handle
  1751. * @pdev: data path pdev handle
  1752. * @val: value of pdev_tx_capture
  1753. *
  1754. * Return: status: 0 - Success, non-zero: Failure
  1755. */
  1756. static inline
  1757. QDF_STATUS cdp_set_pdev_tx_capture(ol_txrx_soc_handle soc,
  1758. struct cdp_pdev *pdev, int val)
  1759. {
  1760. if (soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture)
  1761. return soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture(pdev,
  1762. val);
  1763. return QDF_STATUS_SUCCESS;
  1764. }
  1765. /**
  1766. * cdp_set_pdev_pcp_tid_map() - set pdev pcp-tid-map
  1767. * @soc: opaque soc handle
  1768. * @pdev: data path pdev handle
  1769. * @pcp: pcp value
  1770. * @tid: tid value
  1771. *
  1772. * This API is used to configure the pcp-to-tid mapping for a pdev.
  1773. *
  1774. * Return: QDF_STATUS_SUCCESS if value set successfully
  1775. * QDF_STATUS_E_INVAL false if error
  1776. */
  1777. static inline
  1778. QDF_STATUS cdp_set_pdev_pcp_tid_map(ol_txrx_soc_handle soc,
  1779. struct cdp_pdev *pdev,
  1780. uint32_t pcp, uint32_t tid)
  1781. {
  1782. if (!soc || !soc->ops) {
  1783. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1784. "%s: Invalid Instance", __func__);
  1785. return QDF_STATUS_E_INVAL;
  1786. }
  1787. if (!soc->ops->cmn_drv_ops ||
  1788. !soc->ops->cmn_drv_ops->set_pdev_pcp_tid_map)
  1789. return QDF_STATUS_E_INVAL;
  1790. return soc->ops->cmn_drv_ops->set_pdev_pcp_tid_map(pdev, pcp, tid);
  1791. }
  1792. /**
  1793. * cdp_set_pdev_pcp_tidmap_prty() - set pdev tidmap priority
  1794. * @soc: opaque soc handle
  1795. * @pdev: data path pdev handle
  1796. * @val: priority value
  1797. *
  1798. * This API is used to configure the tidmap priority for a pdev.
  1799. * The tidmap priority decides which mapping, namely DSCP-TID, SVLAN_PCP-TID,
  1800. * CVLAN_PCP-TID will be used.
  1801. *
  1802. * Return: QDF_STATUS_SUCCESS if value set successfully
  1803. * QDF_STATUS_E_INVAL false if error
  1804. */
  1805. static inline
  1806. QDF_STATUS cdp_set_pdev_tidmap_prty(ol_txrx_soc_handle soc,
  1807. struct cdp_pdev *pdev_handle,
  1808. uint32_t val)
  1809. {
  1810. if (!soc || !soc->ops) {
  1811. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1812. "%s: Invalid Instance", __func__);
  1813. return QDF_STATUS_E_INVAL;
  1814. }
  1815. if (!soc->ops->cmn_drv_ops ||
  1816. !soc->ops->cmn_drv_ops->set_pdev_tidmap_prty)
  1817. return QDF_STATUS_E_INVAL;
  1818. return soc->ops->cmn_drv_ops->set_pdev_tidmap_prty(pdev_handle, val);
  1819. }
  1820. /**
  1821. * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id
  1822. * @soc: opaque soc handle
  1823. * @pdev: data path pdev handle
  1824. * @peer_id: data path peer id
  1825. * @peer_mac: peer_mac
  1826. *
  1827. * Return: void
  1828. */
  1829. static inline
  1830. void cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc,
  1831. struct cdp_pdev *pdev_handle,
  1832. uint32_t peer_id, uint8_t *peer_mac)
  1833. {
  1834. if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id)
  1835. soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id(
  1836. pdev_handle, peer_id, peer_mac);
  1837. }
  1838. /**
  1839. * cdp_vdev_tx_lock() - acquire lock
  1840. * @soc: opaque soc handle
  1841. * @vdev: data path vdev handle
  1842. *
  1843. * Return: void
  1844. */
  1845. static inline
  1846. void cdp_vdev_tx_lock(ol_txrx_soc_handle soc,
  1847. struct cdp_vdev *vdev)
  1848. {
  1849. if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock)
  1850. soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(vdev);
  1851. }
  1852. /**
  1853. * cdp_vdev_tx_unlock() - release lock
  1854. * @soc: opaque soc handle
  1855. * @vdev: data path vdev handle
  1856. *
  1857. * Return: void
  1858. */
  1859. static inline
  1860. void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc,
  1861. struct cdp_vdev *vdev)
  1862. {
  1863. if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock)
  1864. soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(vdev);
  1865. }
  1866. /**
  1867. * cdp_ath_getstats() - get updated athstats
  1868. * @soc: opaque soc handle
  1869. * @dev: dp interface handle
  1870. * @stats: cdp network device stats structure
  1871. * @type: device type pdev/vdev
  1872. *
  1873. * Return: void
  1874. */
  1875. static inline void cdp_ath_getstats(ol_txrx_soc_handle soc,
  1876. void *dev, struct cdp_dev_stats *stats,
  1877. uint8_t type)
  1878. {
  1879. if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats)
  1880. soc->ops->cmn_drv_ops->txrx_ath_getstats(dev, stats, type);
  1881. }
  1882. /**
  1883. * cdp_set_gid_flag() - set groupid flag
  1884. * @soc: opaque soc handle
  1885. * @pdev: data path pdev handle
  1886. * @mem_status: member status from grp management frame
  1887. * @user_position: user position from grp management frame
  1888. *
  1889. * Return: void
  1890. */
  1891. static inline
  1892. void cdp_set_gid_flag(ol_txrx_soc_handle soc,
  1893. struct cdp_pdev *pdev, u_int8_t *mem_status,
  1894. u_int8_t *user_position)
  1895. {
  1896. if (soc->ops->cmn_drv_ops->txrx_set_gid_flag)
  1897. soc->ops->cmn_drv_ops->txrx_set_gid_flag(pdev, mem_status, user_position);
  1898. }
  1899. /**
  1900. * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version
  1901. * @soc: opaque soc handle
  1902. * @pdev: data path pdev handle
  1903. *
  1904. */
  1905. static inline
  1906. uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc,
  1907. struct cdp_pdev *pdev)
  1908. {
  1909. if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version)
  1910. return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(pdev);
  1911. return 0;
  1912. }
  1913. /**
  1914. * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev
  1915. * @soc: opaque soc handle
  1916. * @ni: associated node
  1917. * @force: number of frame in SW queue
  1918. * Return: void
  1919. */
  1920. static inline
  1921. void cdp_if_mgmt_drain(ol_txrx_soc_handle soc,
  1922. void *ni, int force)
  1923. {
  1924. if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain)
  1925. soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(ni, force);
  1926. }
  1927. /* cdp_peer_map_attach() - CDP API to allocate PEER map memory
  1928. * @soc: opaque soc handle
  1929. * @max_peers: number of peers created in FW
  1930. * @max_ast_index: max number of AST index supported in FW
  1931. * @peer_map_unmap_v2: flag indicates HTT peer map v2 is enabled in FW
  1932. *
  1933. *
  1934. * Return: QDF_STATUS
  1935. */
  1936. static inline QDF_STATUS
  1937. cdp_peer_map_attach(ol_txrx_soc_handle soc, uint32_t max_peers,
  1938. uint32_t max_ast_index, bool peer_map_unmap_v2)
  1939. {
  1940. if (soc && soc->ops && soc->ops->cmn_drv_ops &&
  1941. soc->ops->cmn_drv_ops->txrx_peer_map_attach)
  1942. return soc->ops->cmn_drv_ops->txrx_peer_map_attach(soc,
  1943. max_peers,
  1944. max_ast_index,
  1945. peer_map_unmap_v2);
  1946. return QDF_STATUS_SUCCESS;
  1947. }
  1948. /**
  1949. * cdp_pdev_set_ctrl_pdev() - set UMAC ctrl pdev to dp pdev
  1950. * @soc: opaque soc handle
  1951. * @pdev: opaque dp pdev handle
  1952. * @ctrl_pdev: opaque ctrl pdev handle
  1953. *
  1954. * Return: void
  1955. */
  1956. static inline void
  1957. cdp_pdev_set_ctrl_pdev(ol_txrx_soc_handle soc, struct cdp_pdev *dp_pdev,
  1958. struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
  1959. {
  1960. if (soc && soc->ops && soc->ops->cmn_drv_ops &&
  1961. soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev)
  1962. soc->ops->cmn_drv_ops->txrx_pdev_set_ctrl_pdev(dp_pdev,
  1963. ctrl_pdev);
  1964. }
  1965. /* cdp_txrx_classify_and_update() - To classify the packet and update stats
  1966. * @soc: opaque soc handle
  1967. * @vdev: opaque dp vdev handle
  1968. * @skb: data
  1969. * @dir: rx or tx packet
  1970. * @nbuf_classify: packet classification object
  1971. *
  1972. * Return: 1 on success else return 0
  1973. */
  1974. static inline int
  1975. cdp_txrx_classify_and_update(ol_txrx_soc_handle soc,
  1976. struct cdp_vdev *vdev, qdf_nbuf_t skb,
  1977. enum txrx_direction dir,
  1978. struct ol_txrx_nbuf_classify *nbuf_class)
  1979. {
  1980. if (!soc || !soc->ops) {
  1981. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1982. "%s: Invalid Instance", __func__);
  1983. QDF_BUG(0);
  1984. return 0;
  1985. }
  1986. if (!soc->ops->cmn_drv_ops ||
  1987. !soc->ops->cmn_drv_ops->txrx_classify_update)
  1988. return 0;
  1989. return soc->ops->cmn_drv_ops->txrx_classify_update(vdev,
  1990. skb,
  1991. dir, nbuf_class);
  1992. }
  1993. /**
  1994. * cdp_get_dp_capabilities() - get DP capabilities
  1995. * @soc: opaque soc handle
  1996. * @dp_cap: enum of DP capabilities
  1997. *
  1998. * Return: bool
  1999. */
  2000. static inline bool
  2001. cdp_get_dp_capabilities(struct cdp_soc_t *soc, enum cdp_capabilities dp_caps)
  2002. {
  2003. if (soc && soc->ops && soc->ops->cmn_drv_ops &&
  2004. soc->ops->cmn_drv_ops->get_dp_capabilities)
  2005. return soc->ops->cmn_drv_ops->get_dp_capabilities(soc, dp_caps);
  2006. return false;
  2007. }
  2008. #ifdef RECEIVE_OFFLOAD
  2009. /**
  2010. * cdp_register_rx_offld_flush_cb() - register LRO/GRO flush cb function pointer
  2011. * @soc - data path soc handle
  2012. * @pdev - device instance pointer
  2013. *
  2014. * register rx offload flush callback function pointer
  2015. *
  2016. * return none
  2017. */
  2018. static inline void cdp_register_rx_offld_flush_cb(ol_txrx_soc_handle soc,
  2019. void (rx_ol_flush_cb)(void *))
  2020. {
  2021. if (!soc || !soc->ops || !soc->ops->rx_offld_ops) {
  2022. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  2023. "%s invalid instance", __func__);
  2024. return;
  2025. }
  2026. if (soc->ops->rx_offld_ops->register_rx_offld_flush_cb)
  2027. return soc->ops->rx_offld_ops->register_rx_offld_flush_cb(
  2028. rx_ol_flush_cb);
  2029. }
  2030. /**
  2031. * cdp_deregister_rx_offld_flush_cb() - deregister Rx offld flush cb function
  2032. * @soc - data path soc handle
  2033. *
  2034. * deregister rx offload flush callback function pointer
  2035. *
  2036. * return none
  2037. */
  2038. static inline void cdp_deregister_rx_offld_flush_cb(ol_txrx_soc_handle soc)
  2039. {
  2040. if (!soc || !soc->ops || !soc->ops->rx_offld_ops) {
  2041. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  2042. "%s invalid instance", __func__);
  2043. return;
  2044. }
  2045. if (soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb)
  2046. return soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb();
  2047. }
  2048. #endif /* RECEIVE_OFFLOAD */
  2049. /**
  2050. * @cdp_set_ba_timeout() - set ba aging timeout per AC
  2051. *
  2052. * @soc - pointer to the soc
  2053. * @value - timeout value in millisec
  2054. * @ac - Access category
  2055. *
  2056. * @return - void
  2057. */
  2058. static inline void cdp_set_ba_timeout(ol_txrx_soc_handle soc,
  2059. uint8_t ac, uint32_t value)
  2060. {
  2061. if (!soc || !soc->ops) {
  2062. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2063. "%s: Invalid Instance", __func__);
  2064. QDF_BUG(0);
  2065. return;
  2066. }
  2067. if (!soc->ops->cmn_drv_ops ||
  2068. !soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout)
  2069. return;
  2070. soc->ops->cmn_drv_ops->txrx_set_ba_aging_timeout(soc, ac, value);
  2071. }
  2072. /**
  2073. * @cdp_get_ba_timeout() - return ba aging timeout per AC
  2074. *
  2075. * @soc - pointer to the soc
  2076. * @ac - access category
  2077. * @value - timeout value in millisec
  2078. *
  2079. * @return - void
  2080. */
  2081. static inline void cdp_get_ba_timeout(ol_txrx_soc_handle soc,
  2082. uint8_t ac, uint32_t *value)
  2083. {
  2084. if (!soc || !soc->ops) {
  2085. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2086. "%s: Invalid Instance", __func__);
  2087. QDF_BUG(0);
  2088. return;
  2089. }
  2090. if (!soc->ops->cmn_drv_ops ||
  2091. !soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout)
  2092. return;
  2093. soc->ops->cmn_drv_ops->txrx_get_ba_aging_timeout(soc, ac, value);
  2094. }
  2095. /**
  2096. * cdp_cfg_get() - get cfg for dp enum
  2097. *
  2098. * @soc: pointer to the soc
  2099. * @cfg: cfg enum
  2100. *
  2101. * Return - cfg value
  2102. */
  2103. static inline uint32_t cdp_cfg_get(ol_txrx_soc_handle soc, enum cdp_dp_cfg cfg)
  2104. {
  2105. if (!soc || !soc->ops) {
  2106. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2107. "%s: Invalid Instance", __func__);
  2108. return 0;
  2109. }
  2110. if (!soc->ops->cmn_drv_ops || !soc->ops->cmn_drv_ops->txrx_get_cfg)
  2111. return 0;
  2112. return soc->ops->cmn_drv_ops->txrx_get_cfg(soc, cfg);
  2113. }
  2114. /**
  2115. * cdp_soc_set_rate_stats_ctx() - set rate stats context in soc
  2116. * @soc: opaque soc handle
  2117. * @ctx: rate stats context
  2118. *
  2119. * Return: void
  2120. */
  2121. static inline void
  2122. cdp_soc_set_rate_stats_ctx(ol_txrx_soc_handle soc, void *ctx)
  2123. {
  2124. if (!soc || !soc->ops) {
  2125. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2126. "%s: Invalid Instance:", __func__);
  2127. QDF_BUG(0);
  2128. return;
  2129. }
  2130. if (!soc->ops->cmn_drv_ops ||
  2131. !soc->ops->cmn_drv_ops->set_rate_stats_ctx)
  2132. return;
  2133. soc->ops->cmn_drv_ops->set_rate_stats_ctx((struct cdp_soc_t *)soc,
  2134. ctx);
  2135. }
  2136. /**
  2137. * cdp_soc_get_rate_stats_ctx() - get rate stats context in soc
  2138. * @soc: opaque soc handle
  2139. *
  2140. * Return: void
  2141. */
  2142. static inline void*
  2143. cdp_soc_get_rate_stats_ctx(ol_txrx_soc_handle soc)
  2144. {
  2145. if (!soc || !soc->ops) {
  2146. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2147. "%s: Invalid Instance:", __func__);
  2148. QDF_BUG(0);
  2149. return NULL;
  2150. }
  2151. if (!soc->ops->cmn_drv_ops ||
  2152. !soc->ops->cmn_drv_ops->get_rate_stats_ctx)
  2153. return NULL;
  2154. return soc->ops->cmn_drv_ops->get_rate_stats_ctx(soc);
  2155. }
  2156. /**
  2157. * cdp_peer_flush_rate_stats() - flush peer rate statistics
  2158. * @soc: opaque soc handle
  2159. * @pdev: pdev handle
  2160. * @buf: stats buffer
  2161. */
  2162. static inline void
  2163. cdp_peer_flush_rate_stats(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  2164. void *buf)
  2165. {
  2166. if (!soc || !soc->ops) {
  2167. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2168. "%s: Invalid Instance:", __func__);
  2169. QDF_BUG(0);
  2170. return;
  2171. }
  2172. if (!soc->ops->cmn_drv_ops ||
  2173. !soc->ops->cmn_drv_ops->txrx_peer_flush_rate_stats)
  2174. return;
  2175. soc->ops->cmn_drv_ops->txrx_peer_flush_rate_stats(soc, pdev, buf);
  2176. }
  2177. /**
  2178. * cdp_flush_rate_stats_request() - request flush rate statistics
  2179. * @soc: opaque soc handle
  2180. * @pdev: pdev handle
  2181. */
  2182. static inline void
  2183. cdp_flush_rate_stats_request(struct cdp_soc_t *soc, struct cdp_pdev *pdev)
  2184. {
  2185. if (!soc || !soc->ops) {
  2186. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2187. "%s: Invalid Instance:", __func__);
  2188. QDF_BUG(0);
  2189. return;
  2190. }
  2191. if (!soc->ops->cmn_drv_ops ||
  2192. !soc->ops->cmn_drv_ops->txrx_flush_rate_stats_request)
  2193. return;
  2194. soc->ops->cmn_drv_ops->txrx_flush_rate_stats_request(soc, pdev);
  2195. }
  2196. /**
  2197. * cdp_set_vdev_pcp_tid_map() - set vdev pcp-tid-map
  2198. * @soc: opaque soc handle
  2199. * @vdev: data path vdev handle
  2200. * @pcp: pcp value
  2201. * @tid: tid value
  2202. *
  2203. * This API is used to configure the pcp-to-tid mapping for a pdev.
  2204. *
  2205. * Return: QDF_STATUS_SUCCESS if value set successfully
  2206. * QDF_STATUS_E_INVAL false if error
  2207. */
  2208. static inline
  2209. QDF_STATUS cdp_set_vdev_pcp_tid_map(ol_txrx_soc_handle soc,
  2210. struct cdp_vdev *vdev_handle,
  2211. uint8_t pcp, uint8_t tid)
  2212. {
  2213. if (!soc || !soc->ops) {
  2214. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2215. "%s: Invalid Instance", __func__);
  2216. return QDF_STATUS_E_INVAL;
  2217. }
  2218. if (!soc->ops->cmn_drv_ops ||
  2219. !soc->ops->cmn_drv_ops->set_vdev_pcp_tid_map)
  2220. return QDF_STATUS_E_INVAL;
  2221. return soc->ops->cmn_drv_ops->set_vdev_pcp_tid_map(vdev_handle,
  2222. pcp, tid);
  2223. }
  2224. /**
  2225. * cdp_set_vdev_tidmap_tbl_id() - set vdev tidmap table id
  2226. *
  2227. * @soc: opaque soc handle
  2228. * @vdev: data path vdev handle
  2229. * @mapid: value of mapid
  2230. *
  2231. * This API is used to configure the table-id of the tid-mapping for a vdev.
  2232. * Table '0' is for using the pdev's pcp-tid mapping and '1' is for using
  2233. * the vdev's pcp-tid mapping.
  2234. *
  2235. * Return: QDF_STATUS_SUCCESS if value set successfully
  2236. * QDF_STATUS_E_INVAL false if error
  2237. */
  2238. static inline
  2239. QDF_STATUS cdp_set_vdev_tidmap_tbl_id(ol_txrx_soc_handle soc,
  2240. struct cdp_vdev *vdev_handle,
  2241. uint8_t mapid)
  2242. {
  2243. if (!soc || !soc->ops) {
  2244. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2245. "%s: Invalid Instance", __func__);
  2246. return QDF_STATUS_E_INVAL;
  2247. }
  2248. if (!soc->ops->cmn_drv_ops ||
  2249. !soc->ops->cmn_drv_ops->set_vdev_tidmap_tbl_id)
  2250. return QDF_STATUS_E_INVAL;
  2251. return soc->ops->cmn_drv_ops->set_vdev_tidmap_tbl_id(vdev_handle,
  2252. mapid);
  2253. }
  2254. /**
  2255. * cdp_set_vdev_tidmap_prty() - set vdev tidmap priority
  2256. * @soc: opaque soc handle
  2257. * @vdev: data path vdev handle
  2258. * @prio: tidmap priority value
  2259. *
  2260. * This API is used to configure the tidmap priority for a vdev.
  2261. * The tidmap priority decides which mapping, namely DSCP-TID, SVLAN_PCP-TID,
  2262. * CVLAN_PCP-TID will be used.
  2263. * The vdev tidmap priority will be used only when the tidmap_tbl_id is '1'.
  2264. *
  2265. * Return: QDF_STATUS_SUCCESS if value set successfully
  2266. * QDF_STATUS_E_INVAL false if error
  2267. */
  2268. static inline
  2269. QDF_STATUS cdp_set_vdev_tidmap_prty(ol_txrx_soc_handle soc,
  2270. struct cdp_vdev *vdev_handle, uint8_t prio)
  2271. {
  2272. if (!soc || !soc->ops) {
  2273. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  2274. "%s: Invalid Instance", __func__);
  2275. return QDF_STATUS_E_INVAL;
  2276. }
  2277. if (!soc->ops->cmn_drv_ops ||
  2278. !soc->ops->cmn_drv_ops->set_vdev_tidmap_prty)
  2279. return QDF_STATUS_E_INVAL;
  2280. return soc->ops->cmn_drv_ops->set_vdev_tidmap_prty(vdev_handle, prio);
  2281. }
  2282. #endif /* _CDP_TXRX_CMN_H_ */