cdp_txrx_cmn.h 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457
  1. /*
  2. * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /**
  27. * @file cdp_txrx_cmn.h
  28. * @brief Define the host data path converged API functions
  29. * called by the host control SW and the OS interface module
  30. */
  31. #ifndef _CDP_TXRX_CMN_H_
  32. #define _CDP_TXRX_CMN_H_
  33. #include "qdf_types.h"
  34. #include "qdf_nbuf.h"
  35. #include "cdp_txrx_ops.h"
  36. #include "cdp_txrx_handle.h"
  37. #include "cdp_txrx_cmn_struct.h"
  38. /******************************************************************************
  39. *
  40. * Common Data Path Header File
  41. *
  42. *****************************************************************************/
  43. static inline int
  44. cdp_soc_attach_target(ol_txrx_soc_handle soc)
  45. {
  46. if (!soc || !soc->ops) {
  47. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  48. "%s: Invalid Instance:", __func__);
  49. QDF_BUG(0);
  50. return 0;
  51. }
  52. if (!soc->ops->cmn_drv_ops ||
  53. !soc->ops->cmn_drv_ops->txrx_soc_attach_target)
  54. return 0;
  55. return soc->ops->cmn_drv_ops->txrx_soc_attach_target(soc);
  56. }
  57. static inline int
  58. cdp_soc_get_nss_cfg(ol_txrx_soc_handle soc)
  59. {
  60. if (!soc || !soc->ops) {
  61. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  62. "%s: Invalid Instance:", __func__);
  63. QDF_BUG(0);
  64. return 0;
  65. }
  66. if (!soc->ops->cmn_drv_ops ||
  67. !soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg)
  68. return 0;
  69. return soc->ops->cmn_drv_ops->txrx_soc_get_nss_cfg(soc);
  70. }
  71. static inline void
  72. cdp_soc_set_nss_cfg(ol_txrx_soc_handle soc, uint32_t config)
  73. {
  74. if (!soc || !soc->ops) {
  75. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  76. "%s: Invalid Instance:", __func__);
  77. QDF_BUG(0);
  78. return;
  79. }
  80. if (!soc->ops->cmn_drv_ops ||
  81. !soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg)
  82. return;
  83. soc->ops->cmn_drv_ops->txrx_soc_set_nss_cfg(soc, config);
  84. }
  85. static inline struct cdp_vdev *
  86. cdp_vdev_attach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  87. uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
  88. {
  89. if (!soc || !soc->ops) {
  90. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  91. "%s: Invalid Instance:", __func__);
  92. QDF_BUG(0);
  93. return NULL;
  94. }
  95. if (!soc->ops->cmn_drv_ops ||
  96. !soc->ops->cmn_drv_ops->txrx_vdev_attach)
  97. return NULL;
  98. return soc->ops->cmn_drv_ops->txrx_vdev_attach(pdev,
  99. vdev_mac_addr, vdev_id, op_mode);
  100. }
  101. static inline void
  102. cdp_vdev_detach(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  103. ol_txrx_vdev_delete_cb callback, void *cb_context)
  104. {
  105. if (!soc || !soc->ops) {
  106. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  107. "%s: Invalid Instance:", __func__);
  108. QDF_BUG(0);
  109. return;
  110. }
  111. if (!soc->ops->cmn_drv_ops ||
  112. !soc->ops->cmn_drv_ops->txrx_vdev_detach)
  113. return;
  114. soc->ops->cmn_drv_ops->txrx_vdev_detach(vdev,
  115. callback, cb_context);
  116. }
  117. static inline int
  118. cdp_pdev_attach_target(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
  119. {
  120. if (!soc || !soc->ops) {
  121. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  122. "%s: Invalid Instance:", __func__);
  123. QDF_BUG(0);
  124. return 0;
  125. }
  126. if (!soc->ops->cmn_drv_ops ||
  127. !soc->ops->cmn_drv_ops->txrx_pdev_attach_target)
  128. return 0;
  129. return soc->ops->cmn_drv_ops->txrx_pdev_attach_target(pdev);
  130. }
  131. static inline struct cdp_pdev *cdp_pdev_attach
  132. (ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
  133. HTC_HANDLE htc_pdev, qdf_device_t osdev, uint8_t pdev_id)
  134. {
  135. if (!soc || !soc->ops) {
  136. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  137. "%s: Invalid Instance:", __func__);
  138. QDF_BUG(0);
  139. return NULL;
  140. }
  141. if (!soc->ops->cmn_drv_ops ||
  142. !soc->ops->cmn_drv_ops->txrx_pdev_attach)
  143. return NULL;
  144. return soc->ops->cmn_drv_ops->txrx_pdev_attach(soc, ctrl_pdev,
  145. htc_pdev, osdev, pdev_id);
  146. }
  147. static inline int cdp_pdev_post_attach(ol_txrx_soc_handle soc,
  148. struct cdp_pdev *pdev)
  149. {
  150. if (!soc || !soc->ops) {
  151. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  152. "%s: Invalid Instance:", __func__);
  153. QDF_BUG(0);
  154. return 0;
  155. }
  156. if (!soc->ops->cmn_drv_ops ||
  157. !soc->ops->cmn_drv_ops->txrx_pdev_post_attach)
  158. return 0;
  159. return soc->ops->cmn_drv_ops->txrx_pdev_post_attach(pdev);
  160. }
  161. static inline void
  162. cdp_pdev_pre_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  163. {
  164. if (!soc || !soc->ops) {
  165. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  166. "%s: Invalid Instance:", __func__);
  167. QDF_BUG(0);
  168. return;
  169. }
  170. if (!soc->ops->cmn_drv_ops ||
  171. !soc->ops->cmn_drv_ops->txrx_pdev_pre_detach)
  172. return;
  173. soc->ops->cmn_drv_ops->txrx_pdev_pre_detach(pdev, force);
  174. }
  175. static inline void
  176. cdp_pdev_detach(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, int force)
  177. {
  178. if (!soc || !soc->ops) {
  179. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  180. "%s: Invalid Instance:", __func__);
  181. QDF_BUG(0);
  182. return;
  183. }
  184. if (!soc->ops->cmn_drv_ops ||
  185. !soc->ops->cmn_drv_ops->txrx_pdev_detach)
  186. return;
  187. soc->ops->cmn_drv_ops->txrx_pdev_detach(pdev, force);
  188. }
  189. static inline void *cdp_peer_create
  190. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  191. uint8_t *peer_mac_addr)
  192. {
  193. if (!soc || !soc->ops) {
  194. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  195. "%s: Invalid Instance:", __func__);
  196. QDF_BUG(0);
  197. return NULL;
  198. }
  199. if (!soc->ops->cmn_drv_ops ||
  200. !soc->ops->cmn_drv_ops->txrx_peer_create)
  201. return NULL;
  202. return soc->ops->cmn_drv_ops->txrx_peer_create(vdev,
  203. peer_mac_addr);
  204. }
  205. static inline void cdp_peer_setup
  206. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  207. {
  208. if (!soc || !soc->ops) {
  209. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  210. "%s: Invalid Instance:", __func__);
  211. QDF_BUG(0);
  212. return;
  213. }
  214. if (!soc->ops->cmn_drv_ops ||
  215. !soc->ops->cmn_drv_ops->txrx_peer_setup)
  216. return;
  217. soc->ops->cmn_drv_ops->txrx_peer_setup(vdev,
  218. peer);
  219. }
  220. static inline int cdp_peer_add_ast
  221. (ol_txrx_soc_handle soc, struct cdp_peer *peer_handle,
  222. uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type, uint32_t flags)
  223. {
  224. if (!soc || !soc->ops) {
  225. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  226. "%s: Invalid Instance:", __func__);
  227. QDF_BUG(0);
  228. return 0;
  229. }
  230. if (!soc->ops->cmn_drv_ops ||
  231. !soc->ops->cmn_drv_ops->txrx_peer_add_ast)
  232. return 0;
  233. return soc->ops->cmn_drv_ops->txrx_peer_add_ast(soc,
  234. peer_handle,
  235. mac_addr,
  236. type,
  237. flags);
  238. }
  239. static inline int cdp_peer_update_ast
  240. (ol_txrx_soc_handle soc, void *ast_handle,
  241. struct cdp_peer *peer_handle, uint32_t flags)
  242. {
  243. if (!soc || !soc->ops) {
  244. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  245. "%s: Invalid Instance:", __func__);
  246. QDF_BUG(0);
  247. return 0;
  248. }
  249. if (!soc->ops->cmn_drv_ops ||
  250. !soc->ops->cmn_drv_ops->txrx_peer_update_ast)
  251. return 0;
  252. return soc->ops->cmn_drv_ops->txrx_peer_update_ast(soc,
  253. peer_handle,
  254. ast_handle,
  255. flags);
  256. }
  257. static inline void cdp_peer_del_ast
  258. (ol_txrx_soc_handle soc, void *ast_handle)
  259. {
  260. if (!soc || !soc->ops) {
  261. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  262. "%s: Invalid Instance:", __func__);
  263. QDF_BUG(0);
  264. return;
  265. }
  266. if (!soc->ops->cmn_drv_ops ||
  267. !soc->ops->cmn_drv_ops->txrx_peer_del_ast)
  268. return;
  269. soc->ops->cmn_drv_ops->txrx_peer_del_ast(soc, ast_handle);
  270. }
  271. static inline void *cdp_peer_ast_hash_find
  272. (ol_txrx_soc_handle soc, uint8_t *ast_mac_addr)
  273. {
  274. if (!soc || !soc->ops) {
  275. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  276. "%s: Invalid Instance:", __func__);
  277. QDF_BUG(0);
  278. return NULL;
  279. }
  280. if (!soc->ops->cmn_drv_ops ||
  281. !soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find)
  282. return NULL;
  283. return soc->ops->cmn_drv_ops->txrx_peer_ast_hash_find(soc,
  284. ast_mac_addr);
  285. }
  286. static inline uint8_t cdp_peer_ast_get_pdev_id
  287. (ol_txrx_soc_handle soc, void *ast_handle)
  288. {
  289. if (!soc || !soc->ops) {
  290. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  291. "%s: Invalid Instance:", __func__);
  292. QDF_BUG(0);
  293. return 0xff;
  294. }
  295. if (!soc->ops->cmn_drv_ops ||
  296. !soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id)
  297. return 0xff;
  298. return soc->ops->cmn_drv_ops->txrx_peer_ast_get_pdev_id(soc,
  299. ast_handle);
  300. }
  301. static inline uint8_t cdp_peer_ast_get_next_hop
  302. (ol_txrx_soc_handle soc, void *ast_handle)
  303. {
  304. if (!soc || !soc->ops) {
  305. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  306. "%s: Invalid Instance:", __func__);
  307. QDF_BUG(0);
  308. return 0xff;
  309. }
  310. if (!soc->ops->cmn_drv_ops ||
  311. !soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop)
  312. return 0xff;
  313. return soc->ops->cmn_drv_ops->txrx_peer_ast_get_next_hop(soc,
  314. ast_handle);
  315. }
  316. static inline void cdp_peer_ast_set_type
  317. (ol_txrx_soc_handle soc, void *ast_handle,
  318. enum cdp_txrx_ast_entry_type type)
  319. {
  320. if (!soc || !soc->ops) {
  321. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  322. "%s: Invalid Instance:", __func__);
  323. QDF_BUG(0);
  324. return;
  325. }
  326. if (!soc->ops->cmn_drv_ops ||
  327. !soc->ops->cmn_drv_ops->txrx_peer_ast_set_type)
  328. return;
  329. soc->ops->cmn_drv_ops->txrx_peer_ast_set_type(soc, ast_handle, type);
  330. }
  331. static inline void cdp_peer_teardown
  332. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev, void *peer)
  333. {
  334. if (!soc || !soc->ops) {
  335. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  336. "%s: Invalid Instance:", __func__);
  337. QDF_BUG(0);
  338. return;
  339. }
  340. if (!soc->ops->cmn_drv_ops ||
  341. !soc->ops->cmn_drv_ops->txrx_peer_teardown)
  342. return;
  343. soc->ops->cmn_drv_ops->txrx_peer_teardown(vdev, peer);
  344. }
  345. static inline void
  346. cdp_peer_delete(ol_txrx_soc_handle soc, void *peer, uint32_t bitmap)
  347. {
  348. if (!soc || !soc->ops) {
  349. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  350. "%s: Invalid Instance:", __func__);
  351. QDF_BUG(0);
  352. return;
  353. }
  354. if (!soc->ops->cmn_drv_ops ||
  355. !soc->ops->cmn_drv_ops->txrx_peer_delete)
  356. return;
  357. soc->ops->cmn_drv_ops->txrx_peer_delete(peer, bitmap);
  358. }
  359. static inline int
  360. cdp_set_monitor_mode(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  361. uint8_t smart_monitor)
  362. {
  363. if (!soc || !soc->ops) {
  364. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  365. "%s: Invalid Instance:", __func__);
  366. QDF_BUG(0);
  367. return 0;
  368. }
  369. if (!soc->ops->cmn_drv_ops ||
  370. !soc->ops->cmn_drv_ops->txrx_set_monitor_mode)
  371. return 0;
  372. return soc->ops->cmn_drv_ops->txrx_set_monitor_mode(vdev,
  373. smart_monitor);
  374. }
  375. static inline void
  376. cdp_set_curchan(ol_txrx_soc_handle soc,
  377. struct cdp_pdev *pdev,
  378. uint32_t chan_mhz)
  379. {
  380. if (!soc || !soc->ops) {
  381. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  382. "%s: Invalid Instance:", __func__);
  383. QDF_BUG(0);
  384. return;
  385. }
  386. if (!soc->ops->cmn_drv_ops ||
  387. !soc->ops->cmn_drv_ops->txrx_set_curchan)
  388. return;
  389. soc->ops->cmn_drv_ops->txrx_set_curchan(pdev, chan_mhz);
  390. }
  391. static inline void
  392. cdp_set_privacy_filters(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  393. void *filter, uint32_t num)
  394. {
  395. if (!soc || !soc->ops) {
  396. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  397. "%s: Invalid Instance:", __func__);
  398. QDF_BUG(0);
  399. return;
  400. }
  401. if (!soc->ops->cmn_drv_ops ||
  402. !soc->ops->cmn_drv_ops->txrx_set_privacy_filters)
  403. return;
  404. soc->ops->cmn_drv_ops->txrx_set_privacy_filters(vdev,
  405. filter, num);
  406. }
  407. static inline int
  408. cdp_set_monitor_filter(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  409. struct cdp_monitor_filter *filter_val)
  410. {
  411. if (soc->ops->mon_ops->txrx_set_advance_monitor_filter)
  412. return soc->ops->mon_ops->txrx_set_advance_monitor_filter(pdev,
  413. filter_val);
  414. return 0;
  415. }
  416. /******************************************************************************
  417. * Data Interface (B Interface)
  418. *****************************************************************************/
  419. static inline void
  420. cdp_vdev_register(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  421. void *osif_vdev, struct ol_txrx_ops *txrx_ops)
  422. {
  423. if (!soc || !soc->ops) {
  424. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  425. "%s: Invalid Instance:", __func__);
  426. QDF_BUG(0);
  427. return;
  428. }
  429. if (!soc->ops->cmn_drv_ops ||
  430. !soc->ops->cmn_drv_ops->txrx_vdev_register)
  431. return;
  432. soc->ops->cmn_drv_ops->txrx_vdev_register(vdev,
  433. osif_vdev, txrx_ops);
  434. }
  435. static inline int
  436. cdp_mgmt_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  437. qdf_nbuf_t tx_mgmt_frm, uint8_t type)
  438. {
  439. if (!soc || !soc->ops) {
  440. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  441. "%s: Invalid Instance:", __func__);
  442. QDF_BUG(0);
  443. return 0;
  444. }
  445. if (!soc->ops->cmn_drv_ops ||
  446. !soc->ops->cmn_drv_ops->txrx_mgmt_send)
  447. return 0;
  448. return soc->ops->cmn_drv_ops->txrx_mgmt_send(vdev,
  449. tx_mgmt_frm, type);
  450. }
  451. static inline int
  452. cdp_mgmt_send_ext(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  453. qdf_nbuf_t tx_mgmt_frm, uint8_t type,
  454. uint8_t use_6mbps, uint16_t chanfreq)
  455. {
  456. if (!soc || !soc->ops) {
  457. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  458. "%s: Invalid Instance:", __func__);
  459. QDF_BUG(0);
  460. return 0;
  461. }
  462. if (!soc->ops->cmn_drv_ops ||
  463. !soc->ops->cmn_drv_ops->txrx_mgmt_send_ext)
  464. return 0;
  465. return soc->ops->cmn_drv_ops->txrx_mgmt_send_ext
  466. (vdev, tx_mgmt_frm, type, use_6mbps, chanfreq);
  467. }
  468. static inline void
  469. cdp_mgmt_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  470. uint8_t type, ol_txrx_mgmt_tx_cb download_cb,
  471. ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
  472. {
  473. if (!soc || !soc->ops) {
  474. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  475. "%s: Invalid Instance:", __func__);
  476. QDF_BUG(0);
  477. return;
  478. }
  479. if (!soc->ops->cmn_drv_ops ||
  480. !soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set)
  481. return;
  482. soc->ops->cmn_drv_ops->txrx_mgmt_tx_cb_set
  483. (pdev, type, download_cb, ota_ack_cb, ctxt);
  484. }
  485. static inline int cdp_get_tx_pending(ol_txrx_soc_handle soc,
  486. struct cdp_pdev *pdev)
  487. {
  488. if (!soc || !soc->ops) {
  489. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  490. "%s: Invalid Instance:", __func__);
  491. QDF_BUG(0);
  492. return 0;
  493. }
  494. if (!soc->ops->cmn_drv_ops ||
  495. !soc->ops->cmn_drv_ops->txrx_get_tx_pending)
  496. return 0;
  497. return soc->ops->cmn_drv_ops->txrx_get_tx_pending(pdev);
  498. }
  499. static inline void
  500. cdp_data_tx_cb_set(ol_txrx_soc_handle soc, struct cdp_vdev *data_vdev,
  501. ol_txrx_data_tx_cb callback, void *ctxt)
  502. {
  503. if (!soc || !soc->ops) {
  504. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  505. "%s: Invalid Instance:", __func__);
  506. QDF_BUG(0);
  507. return;
  508. }
  509. if (!soc->ops->cmn_drv_ops ||
  510. !soc->ops->cmn_drv_ops->txrx_data_tx_cb_set)
  511. return;
  512. soc->ops->cmn_drv_ops->txrx_data_tx_cb_set(data_vdev,
  513. callback, ctxt);
  514. }
  515. /******************************************************************************
  516. * Statistics and Debugging Interface (C Inteface)
  517. *****************************************************************************/
  518. /**
  519. * External Device physical address types
  520. *
  521. * Currently, both MAC and IPA uController use the same size addresses
  522. * and descriptors are exchanged between these two depending on the mode.
  523. *
  524. * Rationale: qdf_dma_addr_t is the type used internally on the host for DMA
  525. * operations. However, external device physical address sizes
  526. * may be different from host-specific physical address sizes.
  527. * This calls for the following definitions for target devices
  528. * (MAC, IPA uc).
  529. */
  530. #if HTT_PADDR64
  531. typedef uint64_t target_paddr_t;
  532. #else
  533. typedef uint32_t target_paddr_t;
  534. #endif /*HTT_PADDR64 */
  535. static inline int
  536. cdp_aggr_cfg(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  537. int max_subfrms_ampdu,
  538. int max_subfrms_amsdu)
  539. {
  540. if (!soc || !soc->ops) {
  541. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  542. "%s: Invalid Instance:", __func__);
  543. QDF_BUG(0);
  544. return 0;
  545. }
  546. if (!soc->ops->cmn_drv_ops ||
  547. !soc->ops->cmn_drv_ops->txrx_aggr_cfg)
  548. return 0;
  549. return soc->ops->cmn_drv_ops->txrx_aggr_cfg(vdev,
  550. max_subfrms_ampdu, max_subfrms_amsdu);
  551. }
  552. static inline int
  553. cdp_fw_stats_get(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  554. struct ol_txrx_stats_req *req, bool per_vdev,
  555. bool response_expected)
  556. {
  557. if (!soc || !soc->ops) {
  558. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  559. "%s: Invalid Instance:", __func__);
  560. QDF_BUG(0);
  561. return 0;
  562. }
  563. if (!soc->ops->cmn_drv_ops ||
  564. !soc->ops->cmn_drv_ops->txrx_fw_stats_get)
  565. return 0;
  566. return soc->ops->cmn_drv_ops->txrx_fw_stats_get(vdev, req,
  567. per_vdev, response_expected);
  568. }
  569. static inline int
  570. cdp_debug(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, int debug_specs)
  571. {
  572. if (!soc || !soc->ops) {
  573. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  574. "%s: Invalid Instance:", __func__);
  575. QDF_BUG(0);
  576. return 0;
  577. }
  578. if (!soc->ops->cmn_drv_ops ||
  579. !soc->ops->cmn_drv_ops->txrx_debug)
  580. return 0;
  581. return soc->ops->cmn_drv_ops->txrx_debug(vdev, debug_specs);
  582. }
  583. static inline void cdp_fw_stats_cfg(ol_txrx_soc_handle soc,
  584. struct cdp_vdev *vdev, uint8_t cfg_stats_type, uint32_t cfg_val)
  585. {
  586. if (!soc || !soc->ops) {
  587. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  588. "%s: Invalid Instance:", __func__);
  589. QDF_BUG(0);
  590. return;
  591. }
  592. if (!soc->ops->cmn_drv_ops ||
  593. !soc->ops->cmn_drv_ops->txrx_fw_stats_cfg)
  594. return;
  595. soc->ops->cmn_drv_ops->txrx_fw_stats_cfg(vdev,
  596. cfg_stats_type, cfg_val);
  597. }
  598. static inline void cdp_print_level_set(ol_txrx_soc_handle soc, unsigned level)
  599. {
  600. if (!soc || !soc->ops) {
  601. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  602. "%s: Invalid Instance:", __func__);
  603. QDF_BUG(0);
  604. return;
  605. }
  606. if (!soc->ops->cmn_drv_ops ||
  607. !soc->ops->cmn_drv_ops->txrx_print_level_set)
  608. return;
  609. soc->ops->cmn_drv_ops->txrx_print_level_set(level);
  610. }
  611. static inline uint8_t *
  612. cdp_get_vdev_mac_addr(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  613. {
  614. if (!soc || !soc->ops) {
  615. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  616. "%s: Invalid Instance:", __func__);
  617. QDF_BUG(0);
  618. return NULL;
  619. }
  620. if (!soc->ops->cmn_drv_ops ||
  621. !soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr)
  622. return NULL;
  623. return soc->ops->cmn_drv_ops->txrx_get_vdev_mac_addr(vdev);
  624. }
  625. /**
  626. * cdp_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
  627. * vdev
  628. * @vdev: vdev handle
  629. *
  630. * Return: Handle to struct qdf_mac_addr
  631. */
  632. static inline struct qdf_mac_addr *cdp_get_vdev_struct_mac_addr
  633. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  634. {
  635. if (!soc || !soc->ops) {
  636. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  637. "%s: Invalid Instance:", __func__);
  638. QDF_BUG(0);
  639. return NULL;
  640. }
  641. if (!soc->ops->cmn_drv_ops ||
  642. !soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr)
  643. return NULL;
  644. return soc->ops->cmn_drv_ops->txrx_get_vdev_struct_mac_addr
  645. (vdev);
  646. }
  647. /**
  648. * cdp_get_pdev_from_vdev() - Return handle to pdev of vdev
  649. * @vdev: vdev handle
  650. *
  651. * Return: Handle to pdev
  652. */
  653. static inline struct cdp_pdev *cdp_get_pdev_from_vdev
  654. (ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  655. {
  656. if (!soc || !soc->ops) {
  657. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  658. "%s: Invalid Instance:", __func__);
  659. QDF_BUG(0);
  660. return NULL;
  661. }
  662. if (!soc->ops->cmn_drv_ops ||
  663. !soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev)
  664. return NULL;
  665. return soc->ops->cmn_drv_ops->txrx_get_pdev_from_vdev(vdev);
  666. }
  667. /**
  668. * cdp_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
  669. * @vdev: vdev handle
  670. *
  671. * Return: Handle to control pdev
  672. */
  673. static inline struct cdp_cfg *
  674. cdp_get_ctrl_pdev_from_vdev(ol_txrx_soc_handle soc, struct cdp_vdev *vdev)
  675. {
  676. if (!soc || !soc->ops) {
  677. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  678. "%s: Invalid Instance:", __func__);
  679. QDF_BUG(0);
  680. return NULL;
  681. }
  682. if (!soc->ops->cmn_drv_ops ||
  683. !soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev)
  684. return NULL;
  685. return soc->ops->cmn_drv_ops->txrx_get_ctrl_pdev_from_vdev
  686. (vdev);
  687. }
  688. static inline struct cdp_vdev *
  689. cdp_get_vdev_from_vdev_id(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  690. uint8_t vdev_id)
  691. {
  692. if (!soc || !soc->ops) {
  693. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  694. "%s: Invalid Instance:", __func__);
  695. QDF_BUG(0);
  696. return NULL;
  697. }
  698. if (!soc->ops->cmn_drv_ops ||
  699. !soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id)
  700. return NULL;
  701. return soc->ops->cmn_drv_ops->txrx_get_vdev_from_vdev_id
  702. (pdev, vdev_id);
  703. }
  704. static inline void
  705. cdp_soc_detach(ol_txrx_soc_handle soc)
  706. {
  707. if (!soc || !soc->ops) {
  708. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  709. "%s: Invalid Instance:", __func__);
  710. QDF_BUG(0);
  711. return;
  712. }
  713. if (!soc->ops->cmn_drv_ops ||
  714. !soc->ops->cmn_drv_ops->txrx_soc_detach)
  715. return;
  716. soc->ops->cmn_drv_ops->txrx_soc_detach((void *)soc);
  717. }
  718. static inline int cdp_addba_requestprocess(ol_txrx_soc_handle soc,
  719. void *peer_handle, uint8_t dialogtoken, uint16_t tid,
  720. uint16_t batimeout, uint16_t buffersize, uint16_t startseqnum)
  721. {
  722. if (!soc || !soc->ops) {
  723. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  724. "%s: Invalid Instance:", __func__);
  725. QDF_BUG(0);
  726. return 0;
  727. }
  728. if (!soc->ops->cmn_drv_ops ||
  729. !soc->ops->cmn_drv_ops->addba_requestprocess)
  730. return 0;
  731. return soc->ops->cmn_drv_ops->addba_requestprocess(peer_handle,
  732. dialogtoken, tid, batimeout, buffersize, startseqnum);
  733. }
  734. static inline void cdp_addba_responsesetup(ol_txrx_soc_handle soc,
  735. void *peer_handle, uint8_t tid, uint8_t *dialogtoken,
  736. uint16_t *statuscode, uint16_t *buffersize, uint16_t *batimeout)
  737. {
  738. if (!soc || !soc->ops) {
  739. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  740. "%s: Invalid Instance:", __func__);
  741. QDF_BUG(0);
  742. return;
  743. }
  744. if (!soc->ops->cmn_drv_ops ||
  745. !soc->ops->cmn_drv_ops->addba_responsesetup)
  746. return;
  747. soc->ops->cmn_drv_ops->addba_responsesetup(peer_handle, tid,
  748. dialogtoken, statuscode, buffersize, batimeout);
  749. }
  750. static inline int cdp_delba_process(ol_txrx_soc_handle soc,
  751. void *peer_handle, int tid, uint16_t reasoncode)
  752. {
  753. if (!soc || !soc->ops) {
  754. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  755. "%s: Invalid Instance:", __func__);
  756. QDF_BUG(0);
  757. return 0;
  758. }
  759. if (!soc->ops->cmn_drv_ops ||
  760. !soc->ops->cmn_drv_ops->delba_process)
  761. return 0;
  762. return soc->ops->cmn_drv_ops->delba_process(peer_handle,
  763. tid, reasoncode);
  764. }
  765. static inline void cdp_set_addbaresponse(ol_txrx_soc_handle soc,
  766. void *peer_handle, int tid, uint16_t statuscode)
  767. {
  768. if (!soc || !soc->ops) {
  769. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  770. "%s: Invalid Instance:", __func__);
  771. QDF_BUG(0);
  772. return;
  773. }
  774. if (!soc->ops->cmn_drv_ops ||
  775. !soc->ops->cmn_drv_ops->set_addba_response)
  776. return;
  777. soc->ops->cmn_drv_ops->set_addba_response(peer_handle, tid, statuscode);
  778. }
  779. /**
  780. * cdp_get_peer_mac_addr_frm_id: function to return vdev id and and peer
  781. * mac address
  782. * @soc: SOC handle
  783. * @peer_id: peer id of the peer for which mac_address is required
  784. * @mac_addr: reference to mac address
  785. *
  786. * reutm: vdev_id of the vap
  787. */
  788. static inline uint8_t
  789. cdp_get_peer_mac_addr_frm_id(ol_txrx_soc_handle soc, uint16_t peer_id,
  790. uint8_t *mac_addr)
  791. {
  792. if (!soc || !soc->ops) {
  793. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  794. "%s: Invalid Instance:", __func__);
  795. QDF_BUG(0);
  796. return CDP_INVALID_VDEV_ID;
  797. }
  798. if (!soc->ops->cmn_drv_ops ||
  799. !soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id)
  800. return CDP_INVALID_VDEV_ID;
  801. return soc->ops->cmn_drv_ops->get_peer_mac_addr_frm_id(soc,
  802. peer_id, mac_addr);
  803. }
  804. /**
  805. * cdp_set_vdev_dscp_tid_map(): function to set DSCP-tid map in the vap
  806. * @vdev: vdev handle
  807. * @map_id: id of the tid map
  808. *
  809. * Return: void
  810. */
  811. static inline void cdp_set_vdev_dscp_tid_map(ol_txrx_soc_handle soc,
  812. struct cdp_vdev *vdev, uint8_t map_id)
  813. {
  814. if (!soc || !soc->ops) {
  815. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  816. "%s: Invalid Instance:", __func__);
  817. QDF_BUG(0);
  818. return;
  819. }
  820. if (!soc->ops->cmn_drv_ops ||
  821. !soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map)
  822. return;
  823. soc->ops->cmn_drv_ops->set_vdev_dscp_tid_map(vdev,
  824. map_id);
  825. }
  826. /**
  827. * cdp_set_pdev_dscp_tid_map(): function to change tid values in DSCP-tid map
  828. * @pdev: pdev handle
  829. * @map_id: id of the tid map
  830. * @tos: index value in map that needs to be changed
  831. * @tid: tid value passed by user
  832. *
  833. * Return: void
  834. */
  835. static inline void cdp_set_pdev_dscp_tid_map(ol_txrx_soc_handle soc,
  836. struct cdp_pdev *pdev, uint8_t map_id, uint8_t tos, uint8_t tid)
  837. {
  838. if (!soc || !soc->ops) {
  839. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  840. "%s: Invalid Instance:", __func__);
  841. QDF_BUG(0);
  842. return;
  843. }
  844. if (!soc->ops->cmn_drv_ops ||
  845. !soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map)
  846. return;
  847. soc->ops->cmn_drv_ops->set_pdev_dscp_tid_map(pdev,
  848. map_id, tos, tid);
  849. }
  850. /**
  851. * cdp_flush_cache_rx_queue() - flush cache rx queue frame
  852. *
  853. * Return: None
  854. */
  855. static inline void cdp_flush_cache_rx_queue(ol_txrx_soc_handle soc)
  856. {
  857. if (!soc || !soc->ops) {
  858. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  859. "%s: Invalid Instance:", __func__);
  860. QDF_BUG(0);
  861. return;
  862. }
  863. if (!soc->ops->cmn_drv_ops ||
  864. !soc->ops->cmn_drv_ops->flush_cache_rx_queue)
  865. return;
  866. soc->ops->cmn_drv_ops->flush_cache_rx_queue();
  867. }
  868. /**
  869. * cdp_txrx_stats_request(): function to map to host and firmware statistics
  870. * @soc: soc handle
  871. * @vdev: virtual device
  872. * @req: stats request container
  873. *
  874. * return: status
  875. */
  876. static inline
  877. int cdp_txrx_stats_request(ol_txrx_soc_handle soc, struct cdp_vdev *vdev,
  878. struct cdp_txrx_stats_req *req)
  879. {
  880. if (!soc || !soc->ops || !soc->ops->cmn_drv_ops || !req) {
  881. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  882. "%s: Invalid Instance:", __func__);
  883. QDF_ASSERT(0);
  884. return 0;
  885. }
  886. if (soc->ops->cmn_drv_ops->txrx_stats_request)
  887. return soc->ops->cmn_drv_ops->txrx_stats_request(vdev, req);
  888. return 0;
  889. }
  890. /**
  891. * cdp_txrx_intr_attach(): function to attach and configure interrupt
  892. * @soc: soc handle
  893. */
  894. static inline QDF_STATUS cdp_txrx_intr_attach(ol_txrx_soc_handle soc)
  895. {
  896. if (!soc || !soc->ops) {
  897. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  898. "%s: Invalid Instance:", __func__);
  899. QDF_BUG(0);
  900. return 0;
  901. }
  902. if (!soc->ops->cmn_drv_ops ||
  903. !soc->ops->cmn_drv_ops->txrx_intr_attach)
  904. return 0;
  905. return soc->ops->cmn_drv_ops->txrx_intr_attach(soc);
  906. }
  907. /**
  908. * cdp_txrx_intr_detach(): function to detach interrupt
  909. * @soc: soc handle
  910. */
  911. static inline void cdp_txrx_intr_detach(ol_txrx_soc_handle soc)
  912. {
  913. if (!soc || !soc->ops) {
  914. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  915. "%s: Invalid Instance:", __func__);
  916. QDF_BUG(0);
  917. return;
  918. }
  919. if (!soc->ops->cmn_drv_ops ||
  920. !soc->ops->cmn_drv_ops->txrx_intr_detach)
  921. return;
  922. soc->ops->cmn_drv_ops->txrx_intr_detach(soc);
  923. }
  924. /**
  925. * cdp_display_stats(): function to map to dump stats
  926. * @soc: soc handle
  927. * @value: statistics option
  928. */
  929. static inline QDF_STATUS
  930. cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value,
  931. enum qdf_stats_verbosity_level level)
  932. {
  933. if (!soc || !soc->ops) {
  934. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  935. "%s: Invalid Instance:", __func__);
  936. QDF_BUG(0);
  937. return 0;
  938. }
  939. if (!soc->ops->cmn_drv_ops ||
  940. !soc->ops->cmn_drv_ops->display_stats)
  941. return 0;
  942. return soc->ops->cmn_drv_ops->display_stats(soc, value, level);
  943. }
  944. /**
  945. * cdp_set_pn_check(): function to set pn check
  946. * @soc: soc handle
  947. * @sec_type: security type
  948. * #rx_pn: receive pn
  949. */
  950. static inline int cdp_set_pn_check(ol_txrx_soc_handle soc,
  951. struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type, uint32_t *rx_pn)
  952. {
  953. if (!soc || !soc->ops) {
  954. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  955. "%s: Invalid Instance:", __func__);
  956. QDF_BUG(0);
  957. return 0;
  958. }
  959. if (!soc->ops->cmn_drv_ops ||
  960. !soc->ops->cmn_drv_ops->set_pn_check)
  961. return 0;
  962. soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle,
  963. sec_type, rx_pn);
  964. return 0;
  965. }
  966. /**
  967. * cdp_update_config_parameters(): function to propagate configuration
  968. * parameters to datapath
  969. * @soc: opaque soc handle
  970. * @cfg: configuration handle
  971. *
  972. * Return: status: 0 - Success, non-zero: Failure
  973. */
  974. static inline
  975. QDF_STATUS cdp_update_config_parameters(ol_txrx_soc_handle soc,
  976. struct cdp_config_params *cfg)
  977. {
  978. struct cdp_soc *psoc = (struct cdp_soc *)soc;
  979. if (!soc || !soc->ops) {
  980. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  981. "%s: Invalid Instance:", __func__);
  982. QDF_BUG(0);
  983. return 0;
  984. }
  985. if (!soc->ops->cmn_drv_ops ||
  986. !soc->ops->cmn_drv_ops->update_config_parameters)
  987. return QDF_STATUS_SUCCESS;
  988. return soc->ops->cmn_drv_ops->update_config_parameters(psoc,
  989. cfg);
  990. }
  991. /**
  992. * cdp_pdev_get_dp_txrx_handle() - get advanced dp handle from pdev
  993. * @soc: opaque soc handle
  994. * @pdev: data path pdev handle
  995. *
  996. * Return: opaque dp handle
  997. */
  998. static inline void *
  999. cdp_pdev_get_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev)
  1000. {
  1001. if (!soc || !soc->ops) {
  1002. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1003. "%s: Invalid Instance:", __func__);
  1004. QDF_BUG(0);
  1005. return 0;
  1006. }
  1007. if (soc->ops->cmn_drv_ops->get_dp_txrx_handle)
  1008. return soc->ops->cmn_drv_ops->get_dp_txrx_handle(pdev);
  1009. return 0;
  1010. }
  1011. /**
  1012. * cdp_pdev_set_dp_txrx_handle() - set advanced dp handle in pdev
  1013. * @soc: opaque soc handle
  1014. * @pdev: data path pdev handle
  1015. * @dp_hdl: opaque pointer for dp_txrx_handle
  1016. *
  1017. * Return: void
  1018. */
  1019. static inline void
  1020. cdp_pdev_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *pdev, void *dp_hdl)
  1021. {
  1022. if (!soc || !soc->ops) {
  1023. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1024. "%s: Invalid Instance:", __func__);
  1025. QDF_BUG(0);
  1026. return;
  1027. }
  1028. if (!soc->ops->cmn_drv_ops ||
  1029. !soc->ops->cmn_drv_ops->set_dp_txrx_handle)
  1030. return;
  1031. soc->ops->cmn_drv_ops->set_dp_txrx_handle(pdev, dp_hdl);
  1032. }
  1033. /*
  1034. * cdp_soc_get_dp_txrx_handle() - get extended dp handle from soc
  1035. * @soc: opaque soc handle
  1036. *
  1037. * Return: opaque extended dp handle
  1038. */
  1039. static inline void *
  1040. cdp_soc_get_dp_txrx_handle(ol_txrx_soc_handle soc)
  1041. {
  1042. if (!soc || !soc->ops) {
  1043. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1044. "%s: Invalid Instance:", __func__);
  1045. QDF_BUG(0);
  1046. return NULL;
  1047. }
  1048. if (soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle)
  1049. return soc->ops->cmn_drv_ops->get_soc_dp_txrx_handle(
  1050. (struct cdp_soc *) soc);
  1051. return NULL;
  1052. }
  1053. /**
  1054. * cdp_soc_set_dp_txrx_handle() - set advanced dp handle in soc
  1055. * @soc: opaque soc handle
  1056. * @dp_hdl: opaque pointer for dp_txrx_handle
  1057. *
  1058. * Return: void
  1059. */
  1060. static inline void
  1061. cdp_soc_set_dp_txrx_handle(ol_txrx_soc_handle soc, void *dp_handle)
  1062. {
  1063. if (!soc || !soc->ops) {
  1064. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1065. "%s: Invalid Instance:", __func__);
  1066. QDF_BUG(0);
  1067. return;
  1068. }
  1069. if (!soc->ops->cmn_drv_ops ||
  1070. !soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle)
  1071. return;
  1072. soc->ops->cmn_drv_ops->set_soc_dp_txrx_handle((struct cdp_soc *)soc,
  1073. dp_handle);
  1074. }
  1075. /**
  1076. * cdp_tx_send() - enqueue frame for transmission
  1077. * @soc: soc opaque handle
  1078. * @vdev: VAP device
  1079. * @nbuf: nbuf to be enqueued
  1080. *
  1081. * This API is used by Extended Datapath modules to enqueue frame for
  1082. * transmission
  1083. *
  1084. * Return: void
  1085. */
  1086. static inline void
  1087. cdp_tx_send(ol_txrx_soc_handle soc, struct cdp_vdev *vdev, qdf_nbuf_t nbuf)
  1088. {
  1089. if (!soc || !soc->ops) {
  1090. QDF_TRACE(QDF_MODULE_ID_CDP, QDF_TRACE_LEVEL_DEBUG,
  1091. "%s: Invalid Instance:", __func__);
  1092. QDF_BUG(0);
  1093. return;
  1094. }
  1095. if (!soc->ops->cmn_drv_ops ||
  1096. !soc->ops->cmn_drv_ops->tx_send)
  1097. return;
  1098. soc->ops->cmn_drv_ops->tx_send(vdev, nbuf);
  1099. }
  1100. /*
  1101. * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev
  1102. * @soc: opaque soc handle
  1103. * @pdev: data path pdev handle
  1104. *
  1105. * Return: pdev_id
  1106. */
  1107. static inline
  1108. uint8_t cdp_get_pdev_id_frm_pdev(ol_txrx_soc_handle soc,
  1109. struct cdp_pdev *pdev)
  1110. {
  1111. if (soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev)
  1112. return soc->ops->cmn_drv_ops->txrx_get_pdev_id_frm_pdev(pdev);
  1113. return 0;
  1114. }
  1115. /**
  1116. * cdp_set_nac() - set nac
  1117. * @soc: opaque soc handle
  1118. * @peer: data path peer handle
  1119. *
  1120. */
  1121. static inline
  1122. void cdp_set_nac(ol_txrx_soc_handle soc,
  1123. struct cdp_peer *peer)
  1124. {
  1125. if (soc->ops->cmn_drv_ops->txrx_set_nac)
  1126. soc->ops->cmn_drv_ops->txrx_set_nac(peer);
  1127. }
  1128. /**
  1129. * cdp_set_pdev_tx_capture() - set pdev tx_capture
  1130. * @soc: opaque soc handle
  1131. * @pdev: data path pdev handle
  1132. * @val: value of pdev_tx_capture
  1133. *
  1134. * Return: void
  1135. */
  1136. static inline
  1137. void cdp_set_pdev_tx_capture(ol_txrx_soc_handle soc,
  1138. struct cdp_pdev *pdev, int val)
  1139. {
  1140. if (soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture)
  1141. return soc->ops->cmn_drv_ops->txrx_set_pdev_tx_capture(pdev,
  1142. val);
  1143. }
  1144. /**
  1145. * cdp_get_peer_mac_from_peer_id() - get peer mac addr from peer id
  1146. * @soc: opaque soc handle
  1147. * @pdev: data path pdev handle
  1148. * @peer_id: data path peer id
  1149. * @peer_mac: peer_mac
  1150. *
  1151. * Return: void
  1152. */
  1153. static inline
  1154. void cdp_get_peer_mac_from_peer_id(ol_txrx_soc_handle soc,
  1155. struct cdp_pdev *pdev_handle,
  1156. uint32_t peer_id, uint8_t *peer_mac)
  1157. {
  1158. if (soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id)
  1159. soc->ops->cmn_drv_ops->txrx_get_peer_mac_from_peer_id(
  1160. pdev_handle, peer_id, peer_mac);
  1161. }
  1162. /**
  1163. * cdp_vdev_tx_lock() - acquire lock
  1164. * @soc: opaque soc handle
  1165. * @vdev: data path vdev handle
  1166. *
  1167. * Return: void
  1168. */
  1169. static inline
  1170. void cdp_vdev_tx_lock(ol_txrx_soc_handle soc,
  1171. struct cdp_vdev *vdev)
  1172. {
  1173. if (soc->ops->cmn_drv_ops->txrx_vdev_tx_lock)
  1174. soc->ops->cmn_drv_ops->txrx_vdev_tx_lock(vdev);
  1175. }
  1176. /**
  1177. * cdp_vdev_tx_unlock() - release lock
  1178. * @soc: opaque soc handle
  1179. * @vdev: data path vdev handle
  1180. *
  1181. * Return: void
  1182. */
  1183. static inline
  1184. void cdp_vdev_tx_unlock(ol_txrx_soc_handle soc,
  1185. struct cdp_vdev *vdev)
  1186. {
  1187. if (soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock)
  1188. soc->ops->cmn_drv_ops->txrx_vdev_tx_unlock(vdev);
  1189. }
  1190. /**
  1191. * cdp_ath_getstats() - get updated athstats
  1192. * @soc: opaque soc handle
  1193. * @pdev: data path pdev handle
  1194. * @net_device_stats: interface stats
  1195. * @rtnl_link_stats64: device statistics structure
  1196. *
  1197. * Return: void
  1198. */
  1199. #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
  1200. static inline void cdp_ath_getstats(ol_txrx_soc_handle soc,
  1201. struct cdp_pdev *pdev, struct net_device_stats *stats)
  1202. #else
  1203. static inline void cdp_ath_getstats(ol_txrx_soc_handle soc,
  1204. struct cdp_pdev *pdev, struct rtnl_link_stats64 *stats)
  1205. #endif
  1206. {
  1207. if (soc && soc->ops && soc->ops->cmn_drv_ops->txrx_ath_getstats)
  1208. soc->ops->cmn_drv_ops->txrx_ath_getstats(pdev, stats);
  1209. }
  1210. /**
  1211. * cdp_set_gid_flag() - set groupid flag
  1212. * @soc: opaque soc handle
  1213. * @pdev: data path pdev handle
  1214. * @mem_status: member status from grp management frame
  1215. * @user_position: user position from grp management frame
  1216. *
  1217. * Return: void
  1218. */
  1219. static inline
  1220. void cdp_set_gid_flag(ol_txrx_soc_handle soc,
  1221. struct cdp_pdev *pdev, u_int8_t *mem_status,
  1222. u_int8_t *user_position)
  1223. {
  1224. if (soc->ops->cmn_drv_ops->txrx_set_gid_flag)
  1225. soc->ops->cmn_drv_ops->txrx_set_gid_flag(pdev, mem_status, user_position);
  1226. }
  1227. /**
  1228. * cdp_fw_supported_enh_stats_version() - returns the fw enhanced stats version
  1229. * @soc: opaque soc handle
  1230. * @pdev: data path pdev handle
  1231. *
  1232. */
  1233. static inline
  1234. uint32_t cdp_fw_supported_enh_stats_version(ol_txrx_soc_handle soc,
  1235. struct cdp_pdev *pdev)
  1236. {
  1237. if (soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version)
  1238. return soc->ops->cmn_drv_ops->txrx_fw_supported_enh_stats_version(pdev);
  1239. return 0;
  1240. }
  1241. /**
  1242. * cdp_get_pdev_id_frm_pdev() - return pdev_id from pdev
  1243. * @soc: opaque soc handle
  1244. * @ni: associated node
  1245. * @force: number of frame in SW queue
  1246. * Return: void
  1247. */
  1248. static inline
  1249. void cdp_if_mgmt_drain(ol_txrx_soc_handle soc,
  1250. void *ni, int force)
  1251. {
  1252. if (soc->ops->cmn_drv_ops->txrx_if_mgmt_drain)
  1253. soc->ops->cmn_drv_ops->txrx_if_mgmt_drain(ni, force);
  1254. }
  1255. #endif /* _CDP_TXRX_CMN_H_ */