cdp_txrx_cmn.h 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615
  1. /*
  2. * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * @file cdp_txrx_cmn.h
  28. * @brief Define the host data path converged API functions
  29. * called by the host control SW and the OS interface module
  30. */
  31. #ifndef _CDP_TXRX_CMN_H_
  32. #define _CDP_TXRX_CMN_H_
  33. #include "qdf_types.h"
  34. #include "qdf_nbuf.h"
  35. #include "cdp_txrx_ops.h"
  36. #include "cdp_txrx_handle.h"
  37. #include "cdp_txrx_cmn_struct.h"
  38. /******************************************************************************
  39. *
  40. * Common Data Path Header File
  41. *
  42. *****************************************************************************/
  43. static inline int
  44. cdp_soc_attach_target(ol_txrx_soc_handle soc)
  45. {
  46. if (!soc || !soc->ops) {
  47. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  48. "%s: Invalid Instance:", __func__);
  49. QDF_BUG(0);
  50. return 0;
  51. }
  52. if (!soc->ops->cmn_drv_ops ||
  53. !soc->ops->cmn_drv_ops->txrx_soc_attach_target)
  54. return 0;
  55. return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc);
  56. }
  57. static inline int
  58. cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc)
  59. {
  60. if (!soc || !soc->ops) {
  61. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  62. "%s: Invalid Instance:", __func__);
  63. QDF_BUG(0);
  64. return 0;
  65. }
  66. if (!soc->ops->cmn_drv_ops ||
  67. !soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg)
  68. return 0;
  69. return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc);
  70. }
  71. static inline void
  72. cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config)
  73. {
  74. if (!soc || !soc->ops) {
  75. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  76. "%s: Invalid Instance:", __func__);
  77. QDF_BUG(0);
  78. return;
  79. }
  80. if (!soc->ops->cmn_drv_ops ||
  81. !soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg)
  82. return;
  83. soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config);
  84. }
  85. static inline struct cdp_vdev *
  86. cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  87. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  88. {
  89. if (!soc || !soc->ops) {
  90. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  91. "%s: Invalid Instance:", __func__);
  92. QDF_BUG(0);
  93. return NULL;
  94. }
  95. if (!soc->ops->cmn_drv_ops ||
  96. !soc->ops->cmn_drv_ops->txrx_vdev_attach)
  97. return NULL;
  98. return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev,
  99. vdev_mac_addr, vdev_id, op_mode);
  100. }
  101. #ifndef CONFIG_WIN
  102. /**
  103. * cdp_flow_pool_map() - Create flow pool for vdev
  104. * @soc - data path soc handle
  105. * @pdev
  106. * @vdev_id - vdev_id corresponding to vdev start
  107. *
  108. * Create per vdev flow pool.
  109. *
  110. * return none
  111. */
  112. static inline QDF_STATUS cdp_flow_pool_map(ol_txrx_soc_handle soc,
  113. struct cdp_pdev *pdev, uint8_t vdev_id)
  114. {
  115. if (!soc || !soc->ops) {
  116. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  117. "%s: Invalid Instance:", __func__);
  118. QDF_BUG(0);
  119. return QDF_STATUS_E_INVAL;
  120. }
  121. if (!soc->ops->flowctl_ops ||
  122. !soc->ops->flowctl_ops->flow_pool_map_handler)
  123. return QDF_STATUS_E_INVAL;
  124. return soc->ops->flowctl_ops->flow_pool_map_handler(soc, pdev, vdev_id);
  125. }
  126. /**
  127. * cdp_flow_pool_unmap() - Delete flow pool
  128. * @soc - data path soc handle
  129. * @pdev
  130. * @vdev_id - vdev_id corresponding to vdev start
  131. *
  132. * Delete flow pool
  133. *
  134. * return none
  135. */
  136. static inline void cdp_flow_pool_unmap(ol_txrx_soc_handle soc,
  137. struct cdp_pdev *pdev, uint8_t vdev_id)
  138. {
  139. if (!soc || !soc->ops) {
  140. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  141. "%s: Invalid Instance:", __func__);
  142. QDF_BUG(0);
  143. return;
  144. }
  145. if (!soc->ops->flowctl_ops ||
  146. !soc->ops->flowctl_ops->flow_pool_unmap_handler)
  147. return;
  148. return soc->ops->flowctl_ops->flow_pool_unmap_handler(soc, pdev,
  149. vdev_id);
  150. }
  151. #endif
  152. static inline void
  153. cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  154. ol_txrx_vdev_delete_cb callback, void *cb_context)
  155. {
  156. if (!soc || !soc->ops) {
  157. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  158. "%s: Invalid Instance:", __func__);
  159. QDF_BUG(0);
  160. return;
  161. }
  162. if (!soc->ops->cmn_drv_ops ||
  163. !soc->ops->cmn_drv_ops->txrx_vdev_detach)
  164. return;
  165. soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev,
  166. callback, cb_context);
  167. }
  168. static inline int
  169. cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
  170. {
  171. if (!soc || !soc->ops) {
  172. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  173. "%s: Invalid Instance:", __func__);
  174. QDF_BUG(0);
  175. return 0;
  176. }
  177. if (!soc->ops->cmn_drv_ops ||
  178. !soc->ops->cmn_drv_ops->txrx_pdev_attach_target)
  179. return 0;
  180. return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev);
  181. }
  182. static inline struct cdp_pdev *cdp_pdev_attach
  183. (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
  184. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
  185. {
  186. if (!soc || !soc->ops) {
  187. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  188. "%s: Invalid Instance:", __func__);
  189. QDF_BUG(0);
  190. return NULL;
  191. }
  192. if (!soc->ops->cmn_drv_ops ||
  193. !soc->ops->cmn_drv_ops->txrx_pdev_attach)
  194. return NULL;
  195. return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev,
  196. htc_pdev, osdev, pdev_id);
  197. }
  198. static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc,
  199. struct cdp_pdev *pdev)
  200. {
  201. if (!soc || !soc->ops) {
  202. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  203. "%s: Invalid Instance:", __func__);
  204. QDF_BUG(0);
  205. return 0;
  206. }
  207. if (!soc->ops->cmn_drv_ops ||
  208. !soc->ops->cmn_drv_ops->txrx_pdev_post_attach)
  209. return 0;
  210. return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev);
  211. }
  212. static inline void
  213. cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  214. {
  215. if (!soc || !soc->ops) {
  216. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  217. "%s: Invalid Instance:", __func__);
  218. QDF_BUG(0);
  219. return;
  220. }
  221. if (!soc->ops->cmn_drv_ops ||
  222. !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach)
  223. return;
  224. soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force);
  225. }
  226. static inline void
  227. cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  228. {
  229. if (!soc || !soc->ops) {
  230. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  231. "%s: Invalid Instance:", __func__);
  232. QDF_BUG(0);
  233. return;
  234. }
  235. if (!soc->ops->cmn_drv_ops ||
  236. !soc->ops->cmn_drv_ops->txrx_pdev_detach)
  237. return;
  238. soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force);
  239. }
  240. static inline void *cdp_peer_create
  241. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  242. uint8_t *peer_mac_addr)
  243. {
  244. if (!soc || !soc->ops) {
  245. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  246. "%s: Invalid Instance:", __func__);
  247. QDF_BUG(0);
  248. return NULL;
  249. }
  250. if (!soc->ops->cmn_drv_ops ||
  251. !soc->ops->cmn_drv_ops->txrx_peer_create)
  252. return NULL;
  253. return soc->ops->cmn_drv_ops->txrx_peer_create(vdev,
  254. peer_mac_addr);
  255. }
  256. static inline void cdp_peer_setup
  257. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  258. {
  259. if (!soc || !soc->ops) {
  260. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  261. "%s: Invalid Instance:", __func__);
  262. QDF_BUG(0);
  263. return;
  264. }
  265. if (!soc->ops->cmn_drv_ops ||
  266. !soc->ops->cmn_drv_ops->txrx_peer_setup)
  267. return;
  268. soc->ops->cmn_drv_ops->txrx_peer_setup(vdev,
  269. peer);
  270. }
  271. static inline void *cdp_peer_ast_hash_find
  272. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr)
  273. {
  274. if (!soc || !soc->ops) {
  275. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  276. "%s: Invalid Instance:", __func__);
  277. QDF_BUG(0);
  278. return NULL;
  279. }
  280. if (!soc->ops->cmn_drv_ops ||
  281. !soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find)
  282. return NULL;
  283. return soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find(soc,
  284. ast_mac_addr);
  285. }
  286. static inline int cdp_peer_add_ast
  287. (ol_txrx_soc_handle soc, struct cdp_peer *peer_handle,
  288. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, uint32_t flags)
  289. {
  290. if (!soc || !soc->ops) {
  291. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  292. "%s: Invalid Instance:", __func__);
  293. QDF_BUG(0);
  294. return 0;
  295. }
  296. if (!soc->ops->cmn_drv_ops ||
  297. !soc->ops->cmn_drv_ops->txrx_peer_add_ast)
  298. return 0;
  299. return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc,
  300. peer_handle,
  301. mac_addr,
  302. type,
  303. flags);
  304. }
  305. static inline void cdp_peer_reset_ast
  306. (ol_txrx_soc_handle soc, uint8_t *wds_macaddr)
  307. {
  308. if (!soc || !soc->ops) {
  309. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  310. "%s: Invalid Instance:", __func__);
  311. QDF_BUG(0);
  312. return;
  313. }
  314. if (!soc->ops->cmn_drv_ops ||
  315. !soc->ops->cmn_drv_ops->txrx_peer_reset_ast)
  316. return;
  317. soc->ops->cmn_drv_ops->txrx_peer_reset_ast(soc, wds_macaddr);
  318. }
  319. static inline void cdp_peer_reset_ast_table
  320. (ol_txrx_soc_handle soc)
  321. {
  322. if (!soc || !soc->ops) {
  323. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  324. "%s: Invalid Instance:", __func__);
  325. QDF_BUG(0);
  326. return;
  327. }
  328. if (!soc->ops->cmn_drv_ops ||
  329. !soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table)
  330. return;
  331. soc->ops->cmn_drv_ops->txrx_peer_reset_ast_table(soc);
  332. }
  333. static inline void cdp_peer_flush_ast_table
  334. (ol_txrx_soc_handle soc)
  335. {
  336. if (!soc || !soc->ops) {
  337. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  338. "%s: Invalid Instance:", __func__);
  339. QDF_BUG(0);
  340. return;
  341. }
  342. if (!soc->ops->cmn_drv_ops ||
  343. !soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table)
  344. return;
  345. soc->ops->cmn_drv_ops->txrx_peer_flush_ast_table(soc);
  346. }
  347. static inline int cdp_peer_update_ast
  348. (ol_txrx_soc_handle soc, uint8_t *wds_macaddr,
  349. struct cdp_peer *peer_handle, uint32_t flags)
  350. {
  351. if (!soc || !soc->ops) {
  352. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  353. "%s: Invalid Instance:", __func__);
  354. QDF_BUG(0);
  355. return 0;
  356. }
  357. if (!soc->ops->cmn_drv_ops ||
  358. !soc->ops->cmn_drv_ops->txrx_peer_update_ast)
  359. return 0;
  360. return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc,
  361. peer_handle,
  362. wds_macaddr,
  363. flags);
  364. }
  365. static inline void cdp_peer_del_ast
  366. (ol_txrx_soc_handle soc, void *ast_handle)
  367. {
  368. if (!soc || !soc->ops) {
  369. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  370. "%s: Invalid Instance:", __func__);
  371. QDF_BUG(0);
  372. return;
  373. }
  374. if (!soc->ops->cmn_drv_ops ||
  375. !soc->ops->cmn_drv_ops->txrx_peer_del_ast)
  376. return;
  377. soc->ops->cmn_drv_ops->txrx_peer_del_ast(soc, ast_handle);
  378. }
  379. static inline uint8_t cdp_peer_ast_get_pdev_id
  380. (ol_txrx_soc_handle soc, void *ast_handle)
  381. {
  382. if (!soc || !soc->ops) {
  383. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  384. "%s: Invalid Instance:", __func__);
  385. QDF_BUG(0);
  386. return 0xff;
  387. }
  388. if (!soc->ops->cmn_drv_ops ||
  389. !soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id)
  390. return 0xff;
  391. return soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id(soc,
  392. ast_handle);
  393. }
  394. static inline uint8_t cdp_peer_ast_get_next_hop
  395. (ol_txrx_soc_handle soc, void *ast_handle)
  396. {
  397. if (!soc || !soc->ops) {
  398. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  399. "%s: Invalid Instance:", __func__);
  400. QDF_BUG(0);
  401. return 0xff;
  402. }
  403. if (!soc->ops->cmn_drv_ops ||
  404. !soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop)
  405. return 0xff;
  406. return soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop(soc,
  407. ast_handle);
  408. }
  409. static inline void cdp_peer_ast_set_type
  410. (ol_txrx_soc_handle soc, void *ast_handle,
  411. enum cdp_txrx_ast_entry_type type)
  412. {
  413. if (!soc || !soc->ops) {
  414. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  415. "%s: Invalid Instance:", __func__);
  416. QDF_BUG(0);
  417. return;
  418. }
  419. if (!soc->ops->cmn_drv_ops ||
  420. !soc->ops->cmn_drv_ops->txrx_peer_ast_set_type)
  421. return;
  422. soc->ops->cmn_drv_ops->txrx_peer_ast_set_type(soc, ast_handle, type);
  423. }
  424. static inline void cdp_peer_teardown
  425. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  426. {
  427. if (!soc || !soc->ops) {
  428. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  429. "%s: Invalid Instance:", __func__);
  430. QDF_BUG(0);
  431. return;
  432. }
  433. if (!soc->ops->cmn_drv_ops ||
  434. !soc->ops->cmn_drv_ops->txrx_peer_teardown)
  435. return;
  436. soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev, peer);
  437. }
  438. static inline void
  439. cdp_peer_delete(ol_txrx_soc_handle soc, void *peer, uint32_t bitmap)
  440. {
  441. if (!soc || !soc->ops) {
  442. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  443. "%s: Invalid Instance:", __func__);
  444. QDF_BUG(0);
  445. return;
  446. }
  447. if (!soc->ops->cmn_drv_ops ||
  448. !soc->ops->cmn_drv_ops->txrx_peer_delete)
  449. return;
  450. soc->ops->cmn_drv_ops->txrx_peer_delete(peer, bitmap);
  451. }
  452. static inline int
  453. cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  454. uint8_t smart_monitor)
  455. {
  456. if (!soc || !soc->ops) {
  457. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  458. "%s: Invalid Instance:", __func__);
  459. QDF_BUG(0);
  460. return 0;
  461. }
  462. if (!soc->ops->cmn_drv_ops ||
  463. !soc->ops->cmn_drv_ops->txrx_set_monitor_mode)
  464. return 0;
  465. return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev,
  466. smart_monitor);
  467. }
  468. static inline void
  469. cdp_set_curchan(ol_txrx_soc_handle soc,
  470. struct cdp_pdev *pdev,
  471. uint32_t chan_mhz)
  472. {
  473. if (!soc || !soc->ops) {
  474. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  475. "%s: Invalid Instance:", __func__);
  476. QDF_BUG(0);
  477. return;
  478. }
  479. if (!soc->ops->cmn_drv_ops ||
  480. !soc->ops->cmn_drv_ops->txrx_set_curchan)
  481. return;
  482. soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz);
  483. }
  484. static inline void
  485. cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  486. void *filter, uint32_t num)
  487. {
  488. if (!soc || !soc->ops) {
  489. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  490. "%s: Invalid Instance:", __func__);
  491. QDF_BUG(0);
  492. return;
  493. }
  494. if (!soc->ops->cmn_drv_ops ||
  495. !soc->ops->cmn_drv_ops->txrx_set_privacy_filters)
  496. return;
  497. soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev,
  498. filter, num);
  499. }
  500. static inline int
  501. cdp_set_monitor_filter(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  502. struct cdp_monitor_filter *filter_val)
  503. {
  504. if (soc->ops->mon_ops->txrx_set_advance_monitor_filter)
  505. return soc->ops->mon_ops->txrx_set_advance_monitor_filter(pdev,
  506. filter_val);
  507. return 0;
  508. }
  509. /******************************************************************************
  510. * Data Interface (B Interface)
  511. *****************************************************************************/
  512. static inline void
  513. cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  514. void *osif_vdev, struct ol_txrx_ops *txrx_ops)
  515. {
  516. if (!soc || !soc->ops) {
  517. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  518. "%s: Invalid Instance:", __func__);
  519. QDF_BUG(0);
  520. return;
  521. }
  522. if (!soc->ops->cmn_drv_ops ||
  523. !soc->ops->cmn_drv_ops->txrx_vdev_register)
  524. return;
  525. soc->ops->cmn_drv_ops->txrx_vdev_register(vdev,
  526. osif_vdev, txrx_ops);
  527. }
  528. static inline int
  529. cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  530. qdf_nbuf_t tx_mgmt_frm, uint8_t type)
  531. {
  532. if (!soc || !soc->ops) {
  533. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  534. "%s: Invalid Instance:", __func__);
  535. QDF_BUG(0);
  536. return 0;
  537. }
  538. if (!soc->ops->cmn_drv_ops ||
  539. !soc->ops->cmn_drv_ops->txrx_mgmt_send)
  540. return 0;
  541. return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev,
  542. tx_mgmt_frm, type);
  543. }
  544. static inline int
  545. cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  546. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  547. uint8_t use_6mbps, uint16_t chanfreq)
  548. {
  549. if (!soc || !soc->ops) {
  550. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  551. "%s: Invalid Instance:", __func__);
  552. QDF_BUG(0);
  553. return 0;
  554. }
  555. if (!soc->ops->cmn_drv_ops ||
  556. !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext)
  557. return 0;
  558. return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext
  559. (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq);
  560. }
  561. static inline void
  562. cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  563. uint8_t type, ol_txrx_mgmt_tx_cb download_cb,
  564. ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
  565. {
  566. if (!soc || !soc->ops) {
  567. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  568. "%s: Invalid Instance:", __func__);
  569. QDF_BUG(0);
  570. return;
  571. }
  572. if (!soc->ops->cmn_drv_ops ||
  573. !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set)
  574. return;
  575. soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set
  576. (pdev, type, download_cb, ota_ack_cb, ctxt);
  577. }
  578. static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc,
  579. struct cdp_pdev *pdev)
  580. {
  581. if (!soc || !soc->ops) {
  582. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  583. "%s: Invalid Instance:", __func__);
  584. QDF_BUG(0);
  585. return 0;
  586. }
  587. if (!soc->ops->cmn_drv_ops ||
  588. !soc->ops->cmn_drv_ops->txrx_get_tx_pending)
  589. return 0;
  590. return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev);
  591. }
  592. static inline void
  593. cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev,
  594. ol_txrx_data_tx_cb callback, void *ctxt)
  595. {
  596. if (!soc || !soc->ops) {
  597. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  598. "%s: Invalid Instance:", __func__);
  599. QDF_BUG(0);
  600. return;
  601. }
  602. if (!soc->ops->cmn_drv_ops ||
  603. !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set)
  604. return;
  605. soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev,
  606. callback, ctxt);
  607. }
  608. /******************************************************************************
  609. * Statistics and Debugging Interface (C Inteface)
  610. *****************************************************************************/
  611. /**
  612. * External Device physical address types
  613. *
  614. * Currently, both MAC and IPA uController use the same size addresses
  615. * and descriptors are exchanged between these two depending on the mode.
  616. *
  617. * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA
  618. * operations. However, external device physical address sizes
  619. * may be different from host-specific physical address sizes.
  620. * This calls for the following definitions for target devices
  621. * (MAC, IPA uc).
  622. */
  623. #if HTT_PADDR64
  624. typedef uint64_t target_paddr_t;
  625. #else
  626. typedef uint32_t target_paddr_t;
  627. #endif /*HTT_PADDR64 */
  628. static inline int
  629. cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  630. int max_subfrms_ampdu,
  631. int max_subfrms_amsdu)
  632. {
  633. if (!soc || !soc->ops) {
  634. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  635. "%s: Invalid Instance:", __func__);
  636. QDF_BUG(0);
  637. return 0;
  638. }
  639. if (!soc->ops->cmn_drv_ops ||
  640. !soc->ops->cmn_drv_ops->txrx_aggr_cfg)
  641. return 0;
  642. return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev,
  643. max_subfrms_ampdu, max_subfrms_amsdu);
  644. }
  645. static inline int
  646. cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  647. struct ol_txrx_stats_req *req, bool per_vdev,
  648. bool response_expected)
  649. {
  650. if (!soc || !soc->ops) {
  651. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  652. "%s: Invalid Instance:", __func__);
  653. QDF_BUG(0);
  654. return 0;
  655. }
  656. if (!soc->ops->cmn_drv_ops ||
  657. !soc->ops->cmn_drv_ops->txrx_fw_stats_get)
  658. return 0;
  659. return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req,
  660. per_vdev, response_expected);
  661. }
  662. static inline int
  663. cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs)
  664. {
  665. if (!soc || !soc->ops) {
  666. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  667. "%s: Invalid Instance:", __func__);
  668. QDF_BUG(0);
  669. return 0;
  670. }
  671. if (!soc->ops->cmn_drv_ops ||
  672. !soc->ops->cmn_drv_ops->txrx_debug)
  673. return 0;
  674. return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs);
  675. }
  676. static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc,
  677. struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val)
  678. {
  679. if (!soc || !soc->ops) {
  680. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  681. "%s: Invalid Instance:", __func__);
  682. QDF_BUG(0);
  683. return;
  684. }
  685. if (!soc->ops->cmn_drv_ops ||
  686. !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg)
  687. return;
  688. soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev,
  689. cfg_stats_type, cfg_val);
  690. }
  691. static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level)
  692. {
  693. if (!soc || !soc->ops) {
  694. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  695. "%s: Invalid Instance:", __func__);
  696. QDF_BUG(0);
  697. return;
  698. }
  699. if (!soc->ops->cmn_drv_ops ||
  700. !soc->ops->cmn_drv_ops->txrx_print_level_set)
  701. return;
  702. soc->ops->cmn_drv_ops->txrx_print_level_set(level);
  703. }
  704. static inline uint8_t *
  705. cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  706. {
  707. if (!soc || !soc->ops) {
  708. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  709. "%s: Invalid Instance:", __func__);
  710. QDF_BUG(0);
  711. return NULL;
  712. }
  713. if (!soc->ops->cmn_drv_ops ||
  714. !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr)
  715. return NULL;
  716. return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev);
  717. }
  718. /**
  719. * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  720. * vdev
  721. * @vdev: vdev handle
  722. *
  723. * Return: Handle to struct qdf_mac_addr
  724. */
  725. static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr
  726. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  727. {
  728. if (!soc || !soc->ops) {
  729. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  730. "%s: Invalid Instance:", __func__);
  731. QDF_BUG(0);
  732. return NULL;
  733. }
  734. if (!soc->ops->cmn_drv_ops ||
  735. !soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr)
  736. return NULL;
  737. return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr
  738. (vdev);
  739. }
  740. /**
  741. * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev
  742. * @vdev: vdev handle
  743. *
  744. * Return: Handle to pdev
  745. */
  746. static inline struct cdp_pdev *cdp_get_pdev_from_vdev
  747. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  748. {
  749. if (!soc || !soc->ops) {
  750. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  751. "%s: Invalid Instance:", __func__);
  752. QDF_BUG(0);
  753. return NULL;
  754. }
  755. if (!soc->ops->cmn_drv_ops ||
  756. !soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev)
  757. return NULL;
  758. return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev);
  759. }
  760. /**
  761. * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  762. * @vdev: vdev handle
  763. *
  764. * Return: Handle to control pdev
  765. */
  766. static inline struct cdp_cfg *
  767. cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  768. {
  769. if (!soc || !soc->ops) {
  770. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  771. "%s: Invalid Instance:", __func__);
  772. QDF_BUG(0);
  773. return NULL;
  774. }
  775. if (!soc->ops->cmn_drv_ops ||
  776. !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev)
  777. return NULL;
  778. return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev
  779. (vdev);
  780. }
  781. static inline struct cdp_vdev *
  782. cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  783. uint8_t vdev_id)
  784. {
  785. if (!soc || !soc->ops) {
  786. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  787. "%s: Invalid Instance:", __func__);
  788. QDF_BUG(0);
  789. return NULL;
  790. }
  791. if (!soc->ops->cmn_drv_ops ||
  792. !soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id)
  793. return NULL;
  794. return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id
  795. (pdev, vdev_id);
  796. }
  797. static inline void
  798. cdp_soc_detach(ol_txrx_soc_handle soc)
  799. {
  800. if (!soc || !soc->ops) {
  801. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  802. "%s: Invalid Instance:", __func__);
  803. QDF_BUG(0);
  804. return;
  805. }
  806. if (!soc->ops->cmn_drv_ops ||
  807. !soc->ops->cmn_drv_ops->txrx_soc_detach)
  808. return;
  809. soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc);
  810. }
  811. static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc,
  812. void *peer_handle, uint8_t dialogtoken, uint16_t tid,
  813. uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum)
  814. {
  815. if (!soc || !soc->ops) {
  816. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  817. "%s: Invalid Instance:", __func__);
  818. QDF_BUG(0);
  819. return 0;
  820. }
  821. if (!soc->ops->cmn_drv_ops ||
  822. !soc->ops->cmn_drv_ops->addba_requestprocess)
  823. return 0;
  824. return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle,
  825. dialogtoken, tid, batimeout, buffersize, startseqnum);
  826. }
  827. static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc,
  828. void *peer_handle, uint8_t tid, uint8_t *dialogtoken,
  829. uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout)
  830. {
  831. if (!soc || !soc->ops) {
  832. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  833. "%s: Invalid Instance:", __func__);
  834. QDF_BUG(0);
  835. return;
  836. }
  837. if (!soc->ops->cmn_drv_ops ||
  838. !soc->ops->cmn_drv_ops->addba_responsesetup)
  839. return;
  840. soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid,
  841. dialogtoken, statuscode, buffersize, batimeout);
  842. }
  843. static inline int cdp_delba_process(ol_txrx_soc_handle soc,
  844. void *peer_handle, int tid, uint16_t reasoncode)
  845. {
  846. if (!soc || !soc->ops) {
  847. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  848. "%s: Invalid Instance:", __func__);
  849. QDF_BUG(0);
  850. return 0;
  851. }
  852. if (!soc->ops->cmn_drv_ops ||
  853. !soc->ops->cmn_drv_ops->delba_process)
  854. return 0;
  855. return soc->ops->cmn_drv_ops->delba_process(peer_handle,
  856. tid, reasoncode);
  857. }
  858. static inline void cdp_set_addbaresponse(ol_txrx_soc_handle soc,
  859. void *peer_handle, int tid, uint16_t statuscode)
  860. {
  861. if (!soc || !soc->ops) {
  862. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  863. "%s: Invalid Instance:", __func__);
  864. QDF_BUG(0);
  865. return;
  866. }
  867. if (!soc->ops->cmn_drv_ops ||
  868. !soc->ops->cmn_drv_ops->set_addba_response)
  869. return;
  870. soc->ops->cmn_drv_ops->set_addba_response(peer_handle, tid, statuscode);
  871. }
  872. /**
  873. * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer
  874. * mac address
  875. * @soc: SOC handle
  876. * @peer_id: peer id of the peer for which mac_address is required
  877. * @mac_addr: reference to mac address
  878. *
  879. * reutm: vdev_id of the vap
  880. */
  881. static inline uint8_t
  882. cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id,
  883. uint8_t *mac_addr)
  884. {
  885. if (!soc || !soc->ops) {
  886. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  887. "%s: Invalid Instance:", __func__);
  888. QDF_BUG(0);
  889. return CDP_INVALID_VDEV_ID;
  890. }
  891. if (!soc->ops->cmn_drv_ops ||
  892. !soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id)
  893. return CDP_INVALID_VDEV_ID;
  894. return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc,
  895. peer_id, mac_addr);
  896. }
  897. /**
  898. * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap
  899. * @vdev: vdev handle
  900. * @map_id: id of the tid map
  901. *
  902. * Return: void
  903. */
  904. static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc,
  905. struct cdp_vdev *vdev, uint8_t map_id)
  906. {
  907. if (!soc || !soc->ops) {
  908. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  909. "%s: Invalid Instance:", __func__);
  910. QDF_BUG(0);
  911. return;
  912. }
  913. if (!soc->ops->cmn_drv_ops ||
  914. !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map)
  915. return;
  916. soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev,
  917. map_id);
  918. }
  919. /**
  920. * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map
  921. * @pdev: pdev handle
  922. * @map_id: id of the tid map
  923. * @tos: index value in map that needs to be changed
  924. * @tid: tid value passed by user
  925. *
  926. * Return: void
  927. */
  928. static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc,
  929. struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid)
  930. {
  931. if (!soc || !soc->ops) {
  932. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  933. "%s: Invalid Instance:", __func__);
  934. QDF_BUG(0);
  935. return;
  936. }
  937. if (!soc->ops->cmn_drv_ops ||
  938. !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map)
  939. return;
  940. soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev,
  941. map_id, tos, tid);
  942. }
  943. /**
  944. * cdp_flush_cache_rx_queue() - flush cache rx queue frame
  945. *
  946. * Return: None
  947. */
  948. static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc)
  949. {
  950. if (!soc || !soc->ops) {
  951. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  952. "%s: Invalid Instance:", __func__);
  953. QDF_BUG(0);
  954. return;
  955. }
  956. if (!soc->ops->cmn_drv_ops ||
  957. !soc->ops->cmn_drv_ops->flush_cache_rx_queue)
  958. return;
  959. soc->ops->cmn_drv_ops->flush_cache_rx_queue();
  960. }
  961. /**
  962. * cdp_txrx_stats_request(): function to map to host and firmware statistics
  963. * @soc: soc handle
  964. * @vdev: virtual device
  965. * @req: stats request container
  966. *
  967. * return: status
  968. */
  969. static inline
  970. int cdp_txrx_stats_request(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  971. struct cdp_txrx_stats_req *req)
  972. {
  973. if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) {
  974. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  975. "%s: Invalid Instance:", __func__);
  976. QDF_ASSERT(0);
  977. return 0;
  978. }
  979. if (soc->ops->cmn_drv_ops->txrx_stats_request)
  980. return soc->ops->cmn_drv_ops->txrx_stats_request(vdev, req);
  981. return 0;
  982. }
  983. /**
  984. * cdp_txrx_intr_attach(): function to attach and configure interrupt
  985. * @soc: soc handle
  986. */
  987. static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc)
  988. {
  989. if (!soc || !soc->ops) {
  990. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  991. "%s: Invalid Instance:", __func__);
  992. QDF_BUG(0);
  993. return 0;
  994. }
  995. if (!soc->ops->cmn_drv_ops ||
  996. !soc->ops->cmn_drv_ops->txrx_intr_attach)
  997. return 0;
  998. return soc->ops->cmn_drv_ops->txrx_intr_attach(soc);
  999. }
  1000. /**
  1001. * cdp_txrx_intr_detach(): function to detach interrupt
  1002. * @soc: soc handle
  1003. */
  1004. static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc)
  1005. {
  1006. if (!soc || !soc->ops) {
  1007. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1008. "%s: Invalid Instance:", __func__);
  1009. QDF_BUG(0);
  1010. return;
  1011. }
  1012. if (!soc->ops->cmn_drv_ops ||
  1013. !soc->ops->cmn_drv_ops->txrx_intr_detach)
  1014. return;
  1015. soc->ops->cmn_drv_ops->txrx_intr_detach(soc);
  1016. }
  1017. /**
  1018. * cdp_display_stats(): function to map to dump stats
  1019. * @soc: soc handle
  1020. * @value: statistics option
  1021. */
  1022. static inline QDF_STATUS
  1023. cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value,
  1024. enum qdf_stats_verbosity_level level)
  1025. {
  1026. if (!soc || !soc->ops) {
  1027. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1028. "%s: Invalid Instance:", __func__);
  1029. QDF_BUG(0);
  1030. return 0;
  1031. }
  1032. if (!soc->ops->cmn_drv_ops ||
  1033. !soc->ops->cmn_drv_ops->display_stats)
  1034. return 0;
  1035. return soc->ops->cmn_drv_ops->display_stats(soc, value, level);
  1036. }
  1037. /**
  1038. * cdp_set_pn_check(): function to set pn check
  1039. * @soc: soc handle
  1040. * @sec_type: security type
  1041. * #rx_pn: receive pn
  1042. */
  1043. static inline int cdp_set_pn_check(ol_txrx_soc_handle soc,
  1044. struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
  1045. {
  1046. if (!soc || !soc->ops) {
  1047. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1048. "%s: Invalid Instance:", __func__);
  1049. QDF_BUG(0);
  1050. return 0;
  1051. }
  1052. if (!soc->ops->cmn_drv_ops ||
  1053. !soc->ops->cmn_drv_ops->set_pn_check)
  1054. return 0;
  1055. soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle,
  1056. sec_type, rx_pn);
  1057. return 0;
  1058. }
  1059. /**
  1060. * cdp_update_config_parameters(): function to propagate configuration
  1061. * parameters to datapath
  1062. * @soc: opaque soc handle
  1063. * @cfg: configuration handle
  1064. *
  1065. * Return: status: 0 - Success, non-zero: Failure
  1066. */
  1067. static inline
  1068. QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc,
  1069. struct cdp_config_params *cfg)
  1070. {
  1071. struct cdp_soc *psoc = (struct cdp_soc *)soc;
  1072. if (!soc || !soc->ops) {
  1073. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1074. "%s: Invalid Instance:", __func__);
  1075. QDF_BUG(0);
  1076. return 0;
  1077. }
  1078. if (!soc->ops->cmn_drv_ops ||
  1079. !soc->ops->cmn_drv_ops->update_config_parameters)
  1080. return QDF_STATUS_SUCCESS;
  1081. return soc->ops->cmn_drv_ops->update_config_parameters(psoc,
  1082. cfg);
  1083. }
  1084. /**
  1085. * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev
  1086. * @soc: opaque soc handle
  1087. * @pdev: data path pdev handle
  1088. *
  1089. * Return: opaque dp handle
  1090. */
  1091. static inline void *
  1092. cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev)
  1093. {
  1094. if (!soc || !soc->ops) {
  1095. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1096. "%s: Invalid Instance:", __func__);
  1097. QDF_BUG(0);
  1098. return 0;
  1099. }
  1100. if (soc->ops->cmn_drv_ops->get_dp_txrx_handle)
  1101. return soc->ops->cmn_drv_ops->get_dp_txrx_handle(pdev);
  1102. return 0;
  1103. }
  1104. /**
  1105. * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev
  1106. * @soc: opaque soc handle
  1107. * @pdev: data path pdev handle
  1108. * @dp_hdl: opaque pointer for dp_txrx_handle
  1109. *
  1110. * Return: void
  1111. */
  1112. static inline void
  1113. cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev, void *dp_hdl)
  1114. {
  1115. if (!soc || !soc->ops) {
  1116. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1117. "%s: Invalid Instance:", __func__);
  1118. QDF_BUG(0);
  1119. return;
  1120. }
  1121. if (!soc->ops->cmn_drv_ops ||
  1122. !soc->ops->cmn_drv_ops->set_dp_txrx_handle)
  1123. return;
  1124. soc->ops->cmn_drv_ops->set_dp_txrx_handle(pdev, dp_hdl);
  1125. }
  1126. /*
  1127. * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc
  1128. * @soc: opaque soc handle
  1129. *
  1130. * Return: opaque extended dp handle
  1131. */
  1132. static inline void *
  1133. cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc)
  1134. {
  1135. if (!soc || !soc->ops) {
  1136. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1137. "%s: Invalid Instance:", __func__);
  1138. QDF_BUG(0);
  1139. return NULL;
  1140. }
  1141. if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle)
  1142. return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle(
  1143. (struct cdp_soc *) soc);
  1144. return NULL;
  1145. }
  1146. /**
  1147. * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc
  1148. * @soc: opaque soc handle
  1149. * @dp_hdl: opaque pointer for dp_txrx_handle
  1150. *
  1151. * Return: void
  1152. */
  1153. static inline void
  1154. cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle)
  1155. {
  1156. if (!soc || !soc->ops) {
  1157. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1158. "%s: Invalid Instance:", __func__);
  1159. QDF_BUG(0);
  1160. return;
  1161. }
  1162. if (!soc->ops->cmn_drv_ops ||
  1163. !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle)
  1164. return;
  1165. soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc,
  1166. dp_handle);
  1167. }
  1168. /**
  1169. * cdp_tx_send() - enqueue frame for transmission
  1170. * @soc: soc opaque handle
  1171. * @vdev: VAP device
  1172. * @nbuf: nbuf to be enqueued
  1173. *
  1174. * This API is used by Extended Datapath modules to enqueue frame for
  1175. * transmission
  1176. *
  1177. * Return: void
  1178. */
  1179. static inline void
  1180. cdp_tx_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, qdf_nbuf_t nbuf)
  1181. {
  1182. if (!soc || !soc->ops) {
  1183. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1184. "%s: Invalid Instance:", __func__);
  1185. QDF_BUG(0);
  1186. return;
  1187. }
  1188. if (!soc->ops->cmn_drv_ops ||
  1189. !soc->ops->cmn_drv_ops->tx_send)
  1190. return;
  1191. soc->ops->cmn_drv_ops->tx_send(vdev, nbuf);
  1192. }
  1193. /*
  1194. * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev
  1195. * @soc: opaque soc handle
  1196. * @pdev: data path pdev handle
  1197. *
  1198. * Return: pdev_id
  1199. */
  1200. static inline
  1201. uint8_t cdp_get_pdev_id_frm_pdev(ol_txrx_soc_handle soc,
  1202. struct cdp_pdev *pdev)
  1203. {
  1204. if (soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev)
  1205. return soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev(pdev);
  1206. return 0;
  1207. }
  1208. /**
  1209. * cdp_set_nac() - set nac
  1210. * @soc: opaque soc handle
  1211. * @peer: data path peer handle
  1212. *
  1213. */
  1214. static inline
  1215. void cdp_set_nac(ol_txrx_soc_handle soc,
  1216. struct cdp_peer *peer)
  1217. {
  1218. if (soc->ops->cmn_drv_ops->txrx_set_nac)
  1219. soc->ops->cmn_drv_ops->txrx_set_nac(peer);
  1220. }
  1221. /**
  1222. * cdp_set_pdev_tx_capture() - set pdev tx_capture
  1223. * @soc: opaque soc handle
  1224. * @pdev: data path pdev handle
  1225. * @val: value of pdev_tx_capture
  1226. *
  1227. * Return: void
  1228. */
  1229. static inline
  1230. void cdp_set_pdev_tx_capture(ol_txrx_soc_handle soc,
  1231. struct cdp_pdev *pdev, int val)
  1232. {
  1233. if (soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture)
  1234. return soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture(pdev,
  1235. val);
  1236. }
  1237. /**
  1238. * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id
  1239. * @soc: opaque soc handle
  1240. * @pdev: data path pdev handle
  1241. * @peer_id: data path peer id
  1242. * @peer_mac: peer_mac
  1243. *
  1244. * Return: void
  1245. */
  1246. static inline
  1247. void cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc,
  1248. struct cdp_pdev *pdev_handle,
  1249. uint32_t peer_id, uint8_t *peer_mac)
  1250. {
  1251. if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id)
  1252. soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id(
  1253. pdev_handle, peer_id, peer_mac);
  1254. }
  1255. /**
  1256. * cdp_vdev_tx_lock() - acquire lock
  1257. * @soc: opaque soc handle
  1258. * @vdev: data path vdev handle
  1259. *
  1260. * Return: void
  1261. */
  1262. static inline
  1263. void cdp_vdev_tx_lock(ol_txrx_soc_handle soc,
  1264. struct cdp_vdev *vdev)
  1265. {
  1266. if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock)
  1267. soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(vdev);
  1268. }
  1269. /**
  1270. * cdp_vdev_tx_unlock() - release lock
  1271. * @soc: opaque soc handle
  1272. * @vdev: data path vdev handle
  1273. *
  1274. * Return: void
  1275. */
  1276. static inline
  1277. void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc,
  1278. struct cdp_vdev *vdev)
  1279. {
  1280. if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock)
  1281. soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(vdev);
  1282. }
  1283. /**
  1284. * cdp_ath_getstats() - get updated athstats
  1285. * @soc: opaque soc handle
  1286. * @dev: dp interface handle
  1287. * @stats: cdp network device stats structure
  1288. * @type: device type pdev/vdev
  1289. *
  1290. * Return: void
  1291. */
  1292. static inline void cdp_ath_getstats(ol_txrx_soc_handle soc,
  1293. void *dev, struct cdp_dev_stats *stats,
  1294. uint8_t type)
  1295. {
  1296. if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats)
  1297. soc->ops->cmn_drv_ops->txrx_ath_getstats(dev, stats, type);
  1298. }
  1299. /**
  1300. * cdp_set_gid_flag() - set groupid flag
  1301. * @soc: opaque soc handle
  1302. * @pdev: data path pdev handle
  1303. * @mem_status: member status from grp management frame
  1304. * @user_position: user position from grp management frame
  1305. *
  1306. * Return: void
  1307. */
  1308. static inline
  1309. void cdp_set_gid_flag(ol_txrx_soc_handle soc,
  1310. struct cdp_pdev *pdev, u_int8_t *mem_status,
  1311. u_int8_t *user_position)
  1312. {
  1313. if (soc->ops->cmn_drv_ops->txrx_set_gid_flag)
  1314. soc->ops->cmn_drv_ops->txrx_set_gid_flag(pdev, mem_status, user_position);
  1315. }
  1316. /**
  1317. * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version
  1318. * @soc: opaque soc handle
  1319. * @pdev: data path pdev handle
  1320. *
  1321. */
  1322. static inline
  1323. uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc,
  1324. struct cdp_pdev *pdev)
  1325. {
  1326. if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version)
  1327. return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(pdev);
  1328. return 0;
  1329. }
  1330. /**
  1331. * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev
  1332. * @soc: opaque soc handle
  1333. * @ni: associated node
  1334. * @force: number of frame in SW queue
  1335. * Return: void
  1336. */
  1337. static inline
  1338. void cdp_if_mgmt_drain(ol_txrx_soc_handle soc,
  1339. void *ni, int force)
  1340. {
  1341. if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain)
  1342. soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(ni, force);
  1343. }
  1344. static inline void
  1345. cdp_peer_map_attach(ol_txrx_soc_handle soc, uint32_t max_peers)
  1346. {
  1347. if (soc && soc->ops && soc->ops->cmn_drv_ops &&
  1348. soc->ops->cmn_drv_ops->txrx_peer_map_attach)
  1349. soc->ops->cmn_drv_ops->txrx_peer_map_attach(soc, max_peers);
  1350. }
  1351. #ifdef RECEIVE_OFFLOAD
  1352. /**
  1353. * cdp_register_rx_offld_flush_cb() - register LRO/GRO flush cb function pointer
  1354. * @soc - data path soc handle
  1355. * @pdev - device instance pointer
  1356. *
  1357. * register rx offload flush callback function pointer
  1358. *
  1359. * return none
  1360. */
  1361. static inline void cdp_register_rx_offld_flush_cb(ol_txrx_soc_handle soc,
  1362. void (rx_ol_flush_cb)(void *))
  1363. {
  1364. if (!soc || !soc->ops || !soc->ops->rx_offld_ops) {
  1365. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  1366. "%s invalid instance", __func__);
  1367. return;
  1368. }
  1369. if (soc->ops->rx_offld_ops->register_rx_offld_flush_cb)
  1370. return soc->ops->rx_offld_ops->register_rx_offld_flush_cb(
  1371. rx_ol_flush_cb);
  1372. }
  1373. /**
  1374. * cdp_deregister_rx_offld_flush_cb() - deregister Rx offld flush cb function
  1375. * @soc - data path soc handle
  1376. *
  1377. * deregister rx offload flush callback function pointer
  1378. *
  1379. * return none
  1380. */
  1381. static inline void cdp_deregister_rx_offld_flush_cb(ol_txrx_soc_handle soc)
  1382. {
  1383. if (!soc || !soc->ops || !soc->ops->rx_offld_ops) {
  1384. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
  1385. "%s invalid instance", __func__);
  1386. return;
  1387. }
  1388. if (soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb)
  1389. return soc->ops->rx_offld_ops->deregister_rx_offld_flush_cb();
  1390. }
  1391. #endif /* RECEIVE_OFFLOAD */
  1392. #endif /* _CDP_TXRX_CMN_H_ */